Skip to content

Commit

Permalink
Final code
Browse files Browse the repository at this point in the history
  • Loading branch information
prathamTailor committed Apr 30, 2023
1 parent e2ef269 commit 880269e
Show file tree
Hide file tree
Showing 15 changed files with 1,077 additions and 0 deletions.
168 changes: 168 additions & 0 deletions AiVirtualMouseProject.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
import sys
sys.path.append('Users\prath\AppData\Local\Programs\Python\Python38\Lib\site-packages')
import cv2
from pynput.keyboard import Key, Listener
import numpy as np
from pynput.keyboard import Key
import HandTrackingModule as htm
import time
import autopy
import keyboard
import pyautogui as py

def show(key):
if key == Key.esc:
return True
else :
return False

def Gesture_Controller():
gc_mode = 0
flag = False

##########################
wCam, hCam = 640, 480
frameR = 100 # Frame Reduction
smoothening = 7
#########################

pTime = 0
plocX, plocY = 0, 0
clocX, clocY = 0, 0

# cv2.VideoCapture(video_path or device index )
# device index: It is just the number to specify the camera. Its possible values ie either 0 or -1.
cap = cv2.VideoCapture(0)
cap.set(3, wCam) #set width of cam
cap.set(4, hCam) #set height of cam
detector = htm.handDetector(maxHands=1)
wScr, hScr = autopy.screen.size() #screen size of device in which program is open
# print(wScr, hScr)


while True:
# 1. Find hand Landmarks
success, img = cap.read() #cap.read() returns a bool (True/False) saved in success. If the frame is read correctly,
# it will be true and store in img
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
# 2. Get the tip of the index and middle fingers
if len(lmList) != 0:
x1, y1 = lmList[8][1:]
x2, y2 = lmList[12][1:]
# print(x1, y1, x2, y2)


# 3. Check which fingers are up
fingers = detector.fingersUp()


# Scroll up
if len(fingers) > 4 and fingers[0] == 0 and fingers[1]==1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 1:
length, img, lineInfo = detector.findDistance(4, 8, img)
# print("IN FUN2")
cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
py.scroll(50)

# drag drop item drop
if len(fingers) > 4 and fingers[0]==1 and fingers[1] == 1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 1:
print("CALL")
flag = False
py.mouseUp(button='left')
if (len(fingers)>3 and fingers[3] == 0) or (len(fingers)>4 and fingers[4] == 0):

# 4. Only Index Finger : Moving Mode
if len(fingers)>4 and fingers[1] == 1 and fingers[2] == 1 and fingers[3]==0 and fingers[4]==0:
# if len(fingers)>2 and fingers[1] == 1 and fingers[2] == 1:
length, img, lineInfo = detector.findDistance(8, 12, img)

# 5. Convert Coordinates
x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr))
y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr))
# 6. Smoothen Values
clocX = plocX + (x3 - plocX) / smoothening
clocY = plocY + (y3 - plocY) / smoothening

# 7. Move Mouse
if length > 40:
autopy.mouse.move(wScr - clocX, clocY)
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
plocX, plocY = clocX, clocY

# 8. Both Index and middle fingers are up : Clicking Mode Right CLick
# if len(fingers) > 2 and fingers[1] == 0 and fingers[2] == 1:
if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 0 and fingers[2] == 1 and fingers[3] == 0 and fingers[4] == 0:
# 9. Find distance between fingers
length, img, lineInfo = detector.findDistance(8, 12, img)
# print(length)
# 10. Click mouse if distance short
if length > 30:
cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
py.click(button = 'left')

# 8. Both Index and middle fingers are up : Clicking Mode Left CLick
# if len(fingers) > 2 and fingers[1] == 1 and fingers[2] == 0:
if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 1 and fingers[2] == 0 and fingers[3] == 0 and fingers[4] == 0:
# 9. Find distance between fingers
length, img, lineInfo = detector.findDistance(8, 12, img)
# print(length)
# 10. Click mouse if distance short
if length > 30:
cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
py.click(button = 'right')

# Double Click
if len(fingers) > 4 and fingers[1] == 1 and fingers[2] == 1 and fingers[0]==0 and fingers[3]==0 and fingers[4]==0:
# 9. Find distance between fingers
length, img, lineInfo = detector.findDistance(8, 12, img)
# print(length)
# 10. Click mouse if distance short
if length < 30:
cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
py.doubleClick()

# Scroll Down
if len(fingers) > 4 and fingers[0] == 0 and fingers[1]==1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 0:
length, img, lineInfo = detector.findDistance(4, 8, img)
# print("IN FUN")
cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
py.scroll(-50)

# Drag and Drop
if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 0 and fingers[2] == 0 and fingers[3] == 0 and fingers[4] == 0:
length, img, lineInfo = detector.findDistance(8, 12, img)

# 5. Convert Coordinates
x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr))
y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr))
# 6. Smoothen Values
clocX = plocX + (x3 - plocX) / smoothening
clocY = plocY + (y3 - plocY) / smoothening

# 7. Move Mouse
# py.mouseDown(button='left')
if not flag:
print("CALL IN")
flag = True
py.mouseDown(button='left')
print("TEMP")
autopy.mouse.move(wScr - clocX, clocY)
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
plocX, plocY = clocX, clocY

# 11. Frame Rate
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
img = cv2.flip(img,1)
cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
# 12. Display
cv2.imshow("Image", img)
cv2.waitKey(1)
if keyboard.is_pressed('esc'):
cv2.destroyAllWindows()
cap.release()
break
125 changes: 125 additions & 0 deletions HandTrackingModule.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
import mediapipe as mp
import time
import math
import numpy as np
import cv2

class handDetector():
def __init__(self, mode=False, maxHands=2, modelComplexity=1,detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.lmList = []
self.maxHands = maxHands
self.modelComplex = modelComplexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex, self.detectionCon, self.trackCon)
# self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]

def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2.cvtColor() method is used to convert an image from one color space to another.
self.results = self.hands.process(imgRGB) #It then processes the RGB image to identify the hands in the image:
# print(self.results.multi_hand_landmarks)

if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,self.mpHands.HAND_CONNECTIONS)

return img

def findPosition(self, img, handNo=0, draw=True):
xList = []
yList = []
bbox = []
# self.lmList = []
self.lmList = []
# print(type(self.results.multi_hand_landmarks))
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape # height, width, channel
cx, cy = int(lm.x * w), int(lm.y * h)
xList.append(cx)
yList.append(cy)
# print(id, cx, cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)

xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
bbox = xmin, ymin, xmax, ymax

if draw:
cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20),
(0, 255, 0), 2)

return self.lmList, bbox

def fingersUp(self):
fingers = []
# Thumb
# print(len(self.lmList))
# print(self.lmList[self.tipIds[0]][1])
# print(self.lmList[self.tipIds[0] - 1][1])
if len(self.lmList) > 1:
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)

# Fingers
for id in range(1, 5):
if len(self.lmList) > 2:
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)

# totalFingers = fingers.count(1)

return fingers

def findDistance(self, p1, p2, img, draw=True, r=15, t=3):
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2

if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)

return length, img, [x1, y1, x2, y2, cx, cy]


def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])

cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime

cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,(255, 0, 255), 3)

cv2.imshow("Image", img)
cv2.waitKey(1)


if __name__ == "__main__":
main()
Binary file added __pycache__/AiVirtualMouseProject.cpython-38.pyc
Binary file not shown.
Binary file added __pycache__/HandTrackingModule.cpython-311.pyc
Binary file not shown.
Binary file added __pycache__/HandTrackingModule.cpython-38.pyc
Binary file not shown.
Binary file added __pycache__/Utility.cpython-38.pyc
Binary file not shown.
Binary file added __pycache__/app.cpython-38.pyc
Binary file not shown.
56 changes: 56 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import eel
import os
from queue import Queue

class ChatBot:

started = False
userinputQueue = Queue()

def isUserInput():
return not ChatBot.userinputQueue.empty()

def popUserInput():
return ChatBot.userinputQueue.get()

def close_callback(route, websockets):
# if not websockets:
# print('Bye!')
exit()

@eel.expose
def getUserInput(msg):
ChatBot.userinputQueue.put(msg)
print(msg)

def close():
ChatBot.started = False

def addUserMsg(msg):
eel.addUserMsg(msg)

def addAppMsg(msg):
eel.addAppMsg(msg)

def start():
path = os.path.dirname(os.path.abspath(__file__))
eel.init(path + r'\web', allowed_extensions=['.js', '.html'])
try:
eel.start('index.html', mode='chrome',
host='localhost',
port=27005,
block=False,
size=(350, 480),
position=(10,100),
disable_cache=True,
close_callback=ChatBot.close_callback)
ChatBot.started = True
while ChatBot.started:
try:
eel.sleep(10.0)
except:
#main thread exited
break

except:
pass
16 changes: 16 additions & 0 deletions open.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import sys
import os
import tkinter as tk
from tkinter import *

window=Tk()

window.title("Running Python Script")
window.geometry('250x250')
def callback():
with open("AiVirtualMouseProject.py", "r", encoding="utf-8") as file:
exec(file.read())
b = tk.Button(window,text="Run Virtual Mouse",command=callback)
b.pack()

window.mainloop()
Loading

0 comments on commit 880269e

Please sign in to comment.