From eb45f2522281d4e3d095e712cb6209fd80f25fb0 Mon Sep 17 00:00:00 2001 From: maniranjan2023 Date: Fri, 25 Oct 2024 10:58:23 +0530 Subject: [PATCH 1/3] ats_resume_using gemini_pro --- ats_resume_using gemini_pro/app.py | 96 ++++++++++++++++++++ ats_resume_using gemini_pro/requirements.txt | 4 + 2 files changed, 100 insertions(+) create mode 100644 ats_resume_using gemini_pro/app.py create mode 100644 ats_resume_using gemini_pro/requirements.txt diff --git a/ats_resume_using gemini_pro/app.py b/ats_resume_using gemini_pro/app.py new file mode 100644 index 0000000..02263e4 --- /dev/null +++ b/ats_resume_using gemini_pro/app.py @@ -0,0 +1,96 @@ +from dotenv import load_dotenv + +load_dotenv() +import base64 +import streamlit as st +import os +import io +from PIL import Image +import pdf2image +import google.generativeai as genai + +genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) + +def get_gemini_response(input,pdf_cotent,prompt): + model=genai.GenerativeModel('gemini-pro-vision') + response=model.generate_content([input,pdf_content[0],prompt]) + return response.text + +def input_pdf_setup(uploaded_file): + if uploaded_file is not None: + ## Convert the PDF to image + images=pdf2image.convert_from_bytes(uploaded_file.read()) + + first_page=images[0] + + # Convert to bytes + img_byte_arr = io.BytesIO() + first_page.save(img_byte_arr, format='JPEG') + img_byte_arr = img_byte_arr.getvalue() + + pdf_parts = [ + { + "mime_type": "image/jpeg", + "data": base64.b64encode(img_byte_arr).decode() # encode to base64 + } + ] + return pdf_parts + else: + raise FileNotFoundError("No file uploaded") + +## Streamlit App + +st.set_page_config(page_title="ATS Resume EXpert") +st.header("ATS Tracking System") +input_text=st.text_area("Job Description: ",key="input") +uploaded_file=st.file_uploader("Upload your resume(PDF)...",type=["pdf"]) + + +if uploaded_file is not None: + st.write("PDF Uploaded Successfully") + + +submit1 = st.button("Tell Me About the Resume") + +#submit2 = st.button("How Can I Improvise my Skills") + +submit3 = st.button("Percentage match") + +input_prompt1 = """ + You are an experienced Technical Human Resource Manager,your task is to review the provided resume against the job description. + Please share your professional evaluation on whether the candidate's profile aligns with the role. + Highlight the strengths and weaknesses of the applicant in relation to the specified job requirements. +""" + +input_prompt3 = """ +You are an skilled ATS (Applicant Tracking System) scanner with a deep understanding of data science and ATS functionality, +your task is to evaluate the resume against the provided job description. give me the percentage of match if the resume matches +the job description. First the output should come as percentage and then keywords missing and last final thoughts. +""" + +if submit1: + if uploaded_file is not None: + pdf_content=input_pdf_setup(uploaded_file) + response=get_gemini_response(input_prompt1,pdf_content,input_text) + st.subheader("The Repsonse is") + st.write(response) + else: + st.write("Please uplaod the resume") + +elif submit3: + if uploaded_file is not None: + pdf_content=input_pdf_setup(uploaded_file) + response=get_gemini_response(input_prompt3,pdf_content,input_text) + st.subheader("The Repsonse is") + st.write(response) + else: + st.write("Please uplaod the resume") + + + + + + + + + diff --git a/ats_resume_using gemini_pro/requirements.txt b/ats_resume_using gemini_pro/requirements.txt new file mode 100644 index 0000000..e750e40 --- /dev/null +++ b/ats_resume_using gemini_pro/requirements.txt @@ -0,0 +1,4 @@ +streamlit +google-generativeai +python-dotenv +pdf2image From 24f3288f444c57f798560b12cf07b35234a73aa7 Mon Sep 17 00:00:00 2001 From: maniranjan2023 Date: Mon, 28 Oct 2024 13:07:04 +0530 Subject: [PATCH 2/3] jarvis_voice assitance --- ats_resume_using gemini_pro/app.py | 96 ----- ats_resume_using gemini_pro/requirements.txt | 4 - .../jarvis-Lte-main/README.md | Bin 0 -> 30 bytes .../jarvis-Lte-main/front.py | 82 +++++ .../jarvis-Lte-main/jarvis_adv.py | 344 ++++++++++++++++++ .../jarvis-Lte-main/requirements.txt | 1 + 6 files changed, 427 insertions(+), 100 deletions(-) delete mode 100644 ats_resume_using gemini_pro/app.py delete mode 100644 ats_resume_using gemini_pro/requirements.txt create mode 100644 jarvis_voice assistance/jarvis-Lte-main/README.md create mode 100644 jarvis_voice assistance/jarvis-Lte-main/front.py create mode 100644 jarvis_voice assistance/jarvis-Lte-main/jarvis_adv.py create mode 100644 jarvis_voice assistance/jarvis-Lte-main/requirements.txt diff --git a/ats_resume_using gemini_pro/app.py b/ats_resume_using gemini_pro/app.py deleted file mode 100644 index 02263e4..0000000 --- a/ats_resume_using gemini_pro/app.py +++ /dev/null @@ -1,96 +0,0 @@ -from dotenv import load_dotenv - -load_dotenv() -import base64 -import streamlit as st -import os -import io -from PIL import Image -import pdf2image -import google.generativeai as genai - -genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) - -def get_gemini_response(input,pdf_cotent,prompt): - model=genai.GenerativeModel('gemini-pro-vision') - response=model.generate_content([input,pdf_content[0],prompt]) - return response.text - -def input_pdf_setup(uploaded_file): - if uploaded_file is not None: - ## Convert the PDF to image - images=pdf2image.convert_from_bytes(uploaded_file.read()) - - first_page=images[0] - - # Convert to bytes - img_byte_arr = io.BytesIO() - first_page.save(img_byte_arr, format='JPEG') - img_byte_arr = img_byte_arr.getvalue() - - pdf_parts = [ - { - "mime_type": "image/jpeg", - "data": base64.b64encode(img_byte_arr).decode() # encode to base64 - } - ] - return pdf_parts - else: - raise FileNotFoundError("No file uploaded") - -## Streamlit App - -st.set_page_config(page_title="ATS Resume EXpert") -st.header("ATS Tracking System") -input_text=st.text_area("Job Description: ",key="input") -uploaded_file=st.file_uploader("Upload your resume(PDF)...",type=["pdf"]) - - -if uploaded_file is not None: - st.write("PDF Uploaded Successfully") - - -submit1 = st.button("Tell Me About the Resume") - -#submit2 = st.button("How Can I Improvise my Skills") - -submit3 = st.button("Percentage match") - -input_prompt1 = """ - You are an experienced Technical Human Resource Manager,your task is to review the provided resume against the job description. - Please share your professional evaluation on whether the candidate's profile aligns with the role. - Highlight the strengths and weaknesses of the applicant in relation to the specified job requirements. -""" - -input_prompt3 = """ -You are an skilled ATS (Applicant Tracking System) scanner with a deep understanding of data science and ATS functionality, -your task is to evaluate the resume against the provided job description. give me the percentage of match if the resume matches -the job description. First the output should come as percentage and then keywords missing and last final thoughts. -""" - -if submit1: - if uploaded_file is not None: - pdf_content=input_pdf_setup(uploaded_file) - response=get_gemini_response(input_prompt1,pdf_content,input_text) - st.subheader("The Repsonse is") - st.write(response) - else: - st.write("Please uplaod the resume") - -elif submit3: - if uploaded_file is not None: - pdf_content=input_pdf_setup(uploaded_file) - response=get_gemini_response(input_prompt3,pdf_content,input_text) - st.subheader("The Repsonse is") - st.write(response) - else: - st.write("Please uplaod the resume") - - - - - - - - - diff --git a/ats_resume_using gemini_pro/requirements.txt b/ats_resume_using gemini_pro/requirements.txt deleted file mode 100644 index e750e40..0000000 --- a/ats_resume_using gemini_pro/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -streamlit -google-generativeai -python-dotenv -pdf2image diff --git a/jarvis_voice assistance/jarvis-Lte-main/README.md b/jarvis_voice assistance/jarvis-Lte-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cc1cbdaaa47a1d4206d058b6bd85e2f60fce82fa GIT binary patch literal 30 lcmezWPnki1A&ViAp@^Z3A(Nq)L6^aYp@booftP`c0RVl71~~u# literal 0 HcmV?d00001 diff --git a/jarvis_voice assistance/jarvis-Lte-main/front.py b/jarvis_voice assistance/jarvis-Lte-main/front.py new file mode 100644 index 0000000..1aa5963 --- /dev/null +++ b/jarvis_voice assistance/jarvis-Lte-main/front.py @@ -0,0 +1,82 @@ +import tkinter as tk +from tkinter import scrolledtext +import threading +import jarvis_adv +import speech_recognition as sr +import pyttsx3 + +class JarvisGUI: + def __init__(self, root): + self.root = root + self.root.title("Jarvis AI Assistant") + + self.output_text = scrolledtext.ScrolledText(root, width=80, height=20) + self.output_text.pack(padx=10, pady=10) + + self.listen_button = tk.Button(root, text="Listen", command=self.listen_command) + self.listen_button.pack() + + self.recognizer = sr.Recognizer() + self.engine = pyttsx3.init() + + self.hologram_label = tk.Label(root, text="Hologram here", font=("Arial", 24)) + self.hologram_label.pack() + self.hologram_moving = False + + def listen_command(self): + self.output_text.insert(tk.END, "Listening...\n") + threading.Thread(target=self.process_audio).start() + + def process_audio(self): + try: + with sr.Microphone() as source: + audio = self.recognizer.listen(source) + user_input = self.recognizer.recognize_google(audio) + self.output_text.insert(tk.END, f"You (Voice): {user_input}\n") + response = self.process_command(user_input) + self.output_text.insert(tk.END, f"Jarvis: {response}\n\n") + self.speak(response) + self.animate_hologram() # Trigger hologram animation + except sr.UnknownValueError: + self.output_text.insert(tk.END, "Could not understand the audio.\n") + self.speak("Could not understand the audio.") + except sr.RequestError as e: + self.output_text.insert(tk.END, f"Error: {str(e)}\n") + self.speak("There was an error processing the audio.") + + def process_command(self, command): + # Call your Jarvis script functions here based on the command + # For example: + if 'hello' in command.lower(): + return "Hello! How can I assist you?" + elif 'play music' in command.lower(): + threading.Thread(target=jarvis_adv.play_music, args=('C:\\path\\to\\music.mp3',)).start() + return "Playing music..." + + # Add more commands and responses as needed + + else: + return "Command not recognized." + + def speak(self, text): + self.engine.say(text) + self.engine.runAndWait() + + def animate_hologram(self): + if not self.hologram_moving: + self.hologram_moving = True + self.move_hologram_left(10) + + def move_hologram_left(self, distance): + if distance > 0: + self.root.after(100, self.move_hologram_left, distance - 1) + else: + self.hologram_label.place(x=150, y=150) # Reset the hologram position + self.hologram_moving = False + + # You can add more complex animations or use libraries like Pygame for smoother animations + +if __name__ == "__main__": + root = tk.Tk() + app = JarvisGUI(root) + root.mainloop() diff --git a/jarvis_voice assistance/jarvis-Lte-main/jarvis_adv.py b/jarvis_voice assistance/jarvis-Lte-main/jarvis_adv.py new file mode 100644 index 0000000..c6c9de6 --- /dev/null +++ b/jarvis_voice assistance/jarvis-Lte-main/jarvis_adv.py @@ -0,0 +1,344 @@ +import pyttsx3 +import speech_recognition as sr +import webbrowser +import pywhatkit +import datetime +import wikipedia +import os +import smtplib +import pygame +import pyautogui +import keyboard +import pyjokes +from PyDictionary import PyDictionary as diction + +Assistant = pyttsx3.init("sapi5") +voices = Assistant.getProperty("voices") +print(voices) +Assistant.setProperty("voices",voices[0].id) +Assistant.setProperty("rate",150) +def speak(audio): + print(" ") + Assistant.say(audio) + Assistant.runAndWait() + + +# if "hello" in query: +# speak("hello sir") + +# else: +# speak("no command found") +def sendEmail(to, content): + server = smtplib.SMTP('smtp.gmail.com', 587) + server.ehlo() + server.starttls() + server.login('shivangrustagi04@gmail.com', 'Laxminagar$92') + server.sendmail('shivangrustagi04@gmail.com', to, content) + server.close() + + +def play_music(file_path): + pygame.mixer.init() + pygame.mixer.music.load(file_path) + pygame.mixer.music.play() + + +def wishMe(): + hour = datetime.datetime.now().hour + if 0 <= hour < 12: + speak("Good Morning!") + elif 12 <= hour < 18: + speak("Good Afternoon!") + else: + speak("Good Evening!") + speak("I am Jarvis Sir. Please tell me how may I help you") + + +def takeCommand(): + r = sr.Recognizer() + with sr.Microphone() as source: + print("Listening...") + r.pause_threshold = 1 + audio = r.listen(source) + + try: + print("Recognizing...") + query = r.recognize_google(audio, language='en-in') + print(f"User said: {query}\n") + + except Exception as e: + print("Say that again please...") + return "None" + return query + + +def sendEmail(to, content): + server = smtplib.SMTP('smtp.gmail.com', 587) + server.ehlo() + server.starttls() + server.login('shivangrustagi04@gmail.com', 'Laxminagar$92') + server.sendmail('shivangrustagi04@gmail.com', to, content) + server.close() + + +def play_music(file_path): + pygame.mixer.init() + pygame.mixer.music.load(file_path) + pygame.mixer.music.play() + + +def main(): + def music(): + speak("tell me your music name") + musicname = takeCommand() + pywhatkit.playonyt(musicname) + speak("your music is playing , enjoy it") + def whatsapp(): + speak("tell me the name of person") + name = takeCommand() + + if "shlok" in name: + speak("tell me the message!!") + msg = takeCommand() + speak("tell me time in hour and minute") + hour = int(takeCommand()) + min = int(takeCommand()) + pywhatkit.sendWhatmsg("+919899755982", msg , hour, min , 20) + speak("ook sir your message will be send!!") + def dict(): + speak("activated dictionary") + speak("tell me problem") + prob1 = takeCommand() + if "meaning" in prob1: + prob1 = prob1.replace("what is the","") + prob1 = prob1.replace("jarvis","") + prob1 = prob1.replace("of","") + prob1 = prob1.replace("meaning","") + result = diction.meaning(prob1) + speak(f"the meaning for {prob1} is {result}") + elif "synonym" in prob1: + prob1 = prob1.replace("what is the","") + prob1 = prob1.replace("jarvis","") + prob1 = prob1.replace("of","") + prob1 = prob1.replace("synonym","") + result = diction.synonym(prob1) + speak(f"the synonym for {prob1} is {result}") + elif "antonym" in prob1: + prob1 = prob1.replace("what is the","") + prob1 = prob1.replace("jarvis","") + prob1 = prob1.replace("of","") + prob1 = prob1.replace("antonym","") + result = diction.antonym(prob1) + speak(f"the antonym for {prob1} is {result}") + speak ("exited") + def screenshot(): + speak("ok but tell me name of screenshot") + path = takeCommand() + path1name = path + '.png' + path1 = "C:\\Users\\shiva\\OneDrive\Desktop\\screenshot\\" + path1name + kk = pyautogui.screenshot() + kk.save(path1) + os.startfile("C:\\Users\\shiva\\OneDrive\\Desktop\\screenshot") + speak(" screenshot taken") + + + def youtubeAuto(): + speak("what is your command") + comm = takeCommand() + + if 'pause' in comm: + keyboard.press("space bar") + + elif "restart" in comm: + keyboard.press("0") + elif "mute" in comm: + keyboard.press("m") + elif "back" in comm : + keyboard.press("j") + elif "skip" in comm : + keyboard.press("l") + elif "full screen" in comm : + keyboard.press("f") + elif "exit full screen" in comm : + keyboard.press("f") + elif "film mode" in comm : + keyboard.press("t") + speak("done sir") + + def chromeAuto(): + speak(" chrome automation started successfully") + + command = takeCommand() + + if "close this tab" in command: + keyboard.press_and_release("ctrl + w") + elif "open new tab" in command: + keyboard.press_and_release("ctrl + t") + elif "open new window" in command: + keyboard.press_and_release("ctrl + n") + elif "open history" in command: + keyboard.press_and_release("ctrl + h") + elif "open previous tab" in command: + keyboard.press_and_release("ctrl + shift + t") + + + + + wishMe() + while True: + query = takeCommand().lower() + + if 'wikipedia' in query: + speak('Searching Wikipedia...') + query = query.replace("wikipedia", "") + results = wikipedia.summary(query, sentences=2) + speak("According to Wikipedia") + print(results) + speak(results) + + if "hello" in query: + speak("hello sir, i am jarvis ") + speak("i am your ai assistant") + speak("how can i help you ") + elif "how are you " in query: + speak(" i am fine sir thank you for asking") + + elif "bye"in query: + speak("have a good day ahead") + break + elif "kya hal hai" in query: + speak("sab jhakaas hai") + elif "main achcha hun tum batao" in query: + speak("main bhi") + elif "you need break" in query: + speak("arlight you can call me anytime") + break + elif "youtube search" in query: + speak("ok sir , finding") + query = query.replace("jarvis","") + query = query.replace("youtube search","") + web = "https://www.youtube.com/results?search_query=" + query + webbrowser.open(web) + speak("this is what i found") + elif "screenshot" in query: + screenshot() + elif "google search" in query: + speak("okay sir finding") + query = query.replace("jarvis","") + query = query.replace("google search","") + pywhatkit.search(query) + speak("DONE sir") + elif "music" in query: + music() + elif "whatsapp" in query: + whatsapp() + elif "website" in query: + speak("ok sir , launching....") + query = query.replace("jarvis","") + query = query.replace("website","") + query = query.replace(" ","") + web1 = query.replace("open","") + web2 = "https://www." + web1 + ".com" + webbrowser.open(web2) + speak("launched") + elif 'dictionary' in query: + dict() + elif "launch" in query: + speak("ok launching the website you said") + name = takeCommand() + web = "https://www." + name +".com" + webbrowser.open(web) + speak("done sir") + elif 'wikipedia' in query: + speak('Searching Wikipedia...') + query = query.replace("wikipedia", "") + results = wikipedia.summary(query, sentences=2) + speak("According to Wikipedia") + print(results) + speak(results) + elif "joke" in query: + get = pyjokes.get_joke() + speak(get) + + elif 'open youtube' in query: + webbrowser.open("youtube.com") + elif 'open facebook' in query: + webbrowser.open("facebook.com") + elif 'google search' in query: + import wikipedia as googleScrap + query = query.replace("jarvis","") + query = query.replace("google search","") + query = query.replace("google","") + speak("this is what i found on internet") + pywhatkit.search(query) + + + try: + result = googleScrap.summary(query,2) + speak(result) + + except: + speak("no data found to tell") + elif 'open stackoverflow' in query: + webbrowser.open("stackoverflow.com") + elif 'play music' in query: + music_file = 'C:\\Users\\shiva\\Downloads\\music.mp3' # Replace with the path to your music file + speak("Playing music now...") + play_music(music_file) + while pygame.mixer.music.get_busy(): + continue + speak("Music has finished playing") + + elif 'the time' in query: + strTime = datetime.datetime.now().strftime("%H:%M:%S") + speak(f"Sir, the time is {strTime}") + elif "restart" in query: + keyboard.press("0") + elif "mute" in query: + keyboard.press("m") + elif "back" in query : + keyboard.press("j") + elif "skip" in query : + keyboard.press("l") + elif "full screen" in query : + keyboard.press("f") + elif "exit full screen" in query : + keyboard.press("f") + elif "film mode" in query : + keyboard.press("t") + elif "pause" in query: + keyboard.press("k") + elif "youtube tool" in query: + youtubeAuto() + elif "close this tab" in query: + keyboard.press_and_release("ctrl + w") + elif "open new tab" in query: + keyboard.press_and_release("ctrl + t") + elif "open new window" in query: + keyboard.press_and_release("ctrl + n") + elif "open history" in query: + keyboard.press_and_release("ctrl + h") + elif "open previous tab" in query: + keyboard.press_and_release("ctrl + shift + t") + elif "chrome auto" in query: + chromeAuto() + elif "repeat my words" in query: + speak("speak sir") + jj = takeCommand() + speak(f"you said : {jj}") + + elif 'gmail' in query: + try: + speak("What should I say?") + content = takeCommand() + to = "shivangrustagi004@gmail.com" + sendEmail(to, content) + speak("Email has been sent!") + except Exception as e: + print(e) + speak("Sorry, I am not able to send this email") + elif 'exit' in query: + speak("Goodbye!") + break +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/jarvis_voice assistance/jarvis-Lte-main/requirements.txt b/jarvis_voice assistance/jarvis-Lte-main/requirements.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/jarvis_voice assistance/jarvis-Lte-main/requirements.txt @@ -0,0 +1 @@ + From 25d10908bdd1b2935bcf75afe63563d0762405d5 Mon Sep 17 00:00:00 2001 From: maniranjan2023 Date: Tue, 29 Oct 2024 11:45:02 +0530 Subject: [PATCH 3/3] voice control game --- voice_control_game/game.py | 98 ++++++++++++++++++++++++++++++ voice_control_game/requirement.txt | 1 + 2 files changed, 99 insertions(+) create mode 100644 voice_control_game/game.py create mode 100644 voice_control_game/requirement.txt diff --git a/voice_control_game/game.py b/voice_control_game/game.py new file mode 100644 index 0000000..38dce91 --- /dev/null +++ b/voice_control_game/game.py @@ -0,0 +1,98 @@ +import pygame +import random +import speech_recognition as sr + +# Initialize pygame +pygame.init() + +# Screen dimensions +WIDTH, HEIGHT = 600, 400 +screen = pygame.display.set_mode((WIDTH, HEIGHT)) +pygame.display.set_caption("Voice-Controlled Catch Game") + +# Colors +WHITE = (255, 255, 255) +BLACK = (0, 0, 0) +RED = (255, 0, 0) +GREEN = (0, 255, 0) + +# Basket parameters +basket_width, basket_height = 100, 20 +basket_x = WIDTH // 2 - basket_width // 2 +basket_y = HEIGHT - basket_height - 10 +basket_speed = 20 + +# Falling object parameters +object_size = 20 +object_x = random.randint(0, WIDTH - object_size) +object_y = 0 +object_speed = 5 + +# Score +score = 0 +font = pygame.font.Font(None, 36) + +# Voice recognition setup +recognizer = sr.Recognizer() +microphone = sr.Microphone() + +def get_voice_command(): + try: + with microphone as source: + recognizer.adjust_for_ambient_noise(source) + print("Listening for command...") + audio = recognizer.listen(source) + command = recognizer.recognize_google(audio) + return command.lower() + except sr.UnknownValueError: + return None + +# Game loop +running = True +while running: + screen.fill(WHITE) + + # Event handling + for event in pygame.event.get(): + if event.type == pygame.QUIT: + running = False + + # Get the voice command to move the basket + command = get_voice_command() + if command == "left": + basket_x -= basket_speed + elif command == "right": + basket_x += basket_speed + + # Keep basket within screen bounds + basket_x = max(0, min(basket_x, WIDTH - basket_width)) + + # Update object position + object_y += object_speed + + # Check for collision + if object_y + object_size >= basket_y and object_x + object_size > basket_x and object_x < basket_x + basket_width: + score += 1 + object_x = random.randint(0, WIDTH - object_size) + object_y = 0 + object_speed += 0.5 # Increase speed for difficulty + + # Reset object if it falls off screen + if object_y > HEIGHT: + object_x = random.randint(0, WIDTH - object_size) + object_y = 0 + + # Draw basket + pygame.draw.rect(screen, BLACK, (basket_x, basket_y, basket_width, basket_height)) + + # Draw falling object + pygame.draw.rect(screen, RED, (object_x, object_y, object_size, object_size)) + + # Draw score + score_text = font.render(f"Score: {score}", True, GREEN) + screen.blit(score_text, (10, 10)) + + pygame.display.flip() + pygame.time.Clock().tick(30) + +pygame.quit() diff --git a/voice_control_game/requirement.txt b/voice_control_game/requirement.txt new file mode 100644 index 0000000..13939ec --- /dev/null +++ b/voice_control_game/requirement.txt @@ -0,0 +1 @@ +pip install pygame SpeechRecognition pyaudio \ No newline at end of file