diff --git a/AiVirtualMouseProject.py b/AiVirtualMouseProject.py new file mode 100644 index 0000000..2fd51ab --- /dev/null +++ b/AiVirtualMouseProject.py @@ -0,0 +1,168 @@ +import sys +sys.path.append('Users\prath\AppData\Local\Programs\Python\Python38\Lib\site-packages') +import cv2 +from pynput.keyboard import Key, Listener +import numpy as np +from pynput.keyboard import Key +import HandTrackingModule as htm +import time +import autopy +import keyboard +import pyautogui as py + +def show(key): + if key == Key.esc: + return True + else : + return False + +def Gesture_Controller(): + gc_mode = 0 + flag = False + + ########################## + wCam, hCam = 640, 480 + frameR = 100 # Frame Reduction + smoothening = 7 + ######################### + + pTime = 0 + plocX, plocY = 0, 0 + clocX, clocY = 0, 0 + + # cv2.VideoCapture(video_path or device index ) + # device index: It is just the number to specify the camera. Its possible values ie either 0 or -1. + cap = cv2.VideoCapture(0) + cap.set(3, wCam) #set width of cam + cap.set(4, hCam) #set height of cam + detector = htm.handDetector(maxHands=1) + wScr, hScr = autopy.screen.size() #screen size of device in which program is open + # print(wScr, hScr) + + + while True: + # 1. Find hand Landmarks + success, img = cap.read() #cap.read() returns a bool (True/False) saved in success. If the frame is read correctly, + # it will be true and store in img + img = detector.findHands(img) + lmList, bbox = detector.findPosition(img) + # 2. Get the tip of the index and middle fingers + if len(lmList) != 0: + x1, y1 = lmList[8][1:] + x2, y2 = lmList[12][1:] + # print(x1, y1, x2, y2) + + + # 3. Check which fingers are up + fingers = detector.fingersUp() + + + # Scroll up + if len(fingers) > 4 and fingers[0] == 0 and fingers[1]==1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 1: + length, img, lineInfo = detector.findDistance(4, 8, img) + # print("IN FUN2") + cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED) + py.scroll(50) + + # drag drop item drop + if len(fingers) > 4 and fingers[0]==1 and fingers[1] == 1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 1: + print("CALL") + flag = False + py.mouseUp(button='left') + if (len(fingers)>3 and fingers[3] == 0) or (len(fingers)>4 and fingers[4] == 0): + + # 4. Only Index Finger : Moving Mode + if len(fingers)>4 and fingers[1] == 1 and fingers[2] == 1 and fingers[3]==0 and fingers[4]==0: + # if len(fingers)>2 and fingers[1] == 1 and fingers[2] == 1: + length, img, lineInfo = detector.findDistance(8, 12, img) + + # 5. Convert Coordinates + x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr)) + y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr)) + # 6. Smoothen Values + clocX = plocX + (x3 - plocX) / smoothening + clocY = plocY + (y3 - plocY) / smoothening + + # 7. Move Mouse + if length > 40: + autopy.mouse.move(wScr - clocX, clocY) + cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED) + cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED) + plocX, plocY = clocX, clocY + + # 8. Both Index and middle fingers are up : Clicking Mode Right CLick + # if len(fingers) > 2 and fingers[1] == 0 and fingers[2] == 1: + if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 0 and fingers[2] == 1 and fingers[3] == 0 and fingers[4] == 0: + # 9. Find distance between fingers + length, img, lineInfo = detector.findDistance(8, 12, img) + # print(length) + # 10. Click mouse if distance short + if length > 30: + cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED) + py.click(button = 'left') + + # 8. Both Index and middle fingers are up : Clicking Mode Left CLick + # if len(fingers) > 2 and fingers[1] == 1 and fingers[2] == 0: + if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 1 and fingers[2] == 0 and fingers[3] == 0 and fingers[4] == 0: + # 9. Find distance between fingers + length, img, lineInfo = detector.findDistance(8, 12, img) + # print(length) + # 10. Click mouse if distance short + if length > 30: + cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED) + py.click(button = 'right') + + # Double Click + if len(fingers) > 4 and fingers[1] == 1 and fingers[2] == 1 and fingers[0]==0 and fingers[3]==0 and fingers[4]==0: + # 9. Find distance between fingers + length, img, lineInfo = detector.findDistance(8, 12, img) + # print(length) + # 10. Click mouse if distance short + if length < 30: + cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED) + py.doubleClick() + + # Scroll Down + if len(fingers) > 4 and fingers[0] == 0 and fingers[1]==1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 0: + length, img, lineInfo = detector.findDistance(4, 8, img) + # print("IN FUN") + cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED) + py.scroll(-50) + + # Drag and Drop + if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 0 and fingers[2] == 0 and fingers[3] == 0 and fingers[4] == 0: + length, img, lineInfo = detector.findDistance(8, 12, img) + + # 5. Convert Coordinates + x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr)) + y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr)) + # 6. Smoothen Values + clocX = plocX + (x3 - plocX) / smoothening + clocY = plocY + (y3 - plocY) / smoothening + + # 7. Move Mouse + # py.mouseDown(button='left') + if not flag: + print("CALL IN") + flag = True + py.mouseDown(button='left') + print("TEMP") + autopy.mouse.move(wScr - clocX, clocY) + cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED) + cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED) + plocX, plocY = clocX, clocY + + # 11. Frame Rate + cTime = time.time() + fps = 1 / (cTime - pTime) + pTime = cTime + img = cv2.flip(img,1) + cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3, + (255, 0, 0), 3) + # 12. Display + cv2.imshow("Image", img) + cv2.waitKey(1) + if keyboard.is_pressed('esc'): + cv2.destroyAllWindows() + cap.release() + break \ No newline at end of file diff --git a/HandTrackingModule.py b/HandTrackingModule.py new file mode 100644 index 0000000..4bd0b15 --- /dev/null +++ b/HandTrackingModule.py @@ -0,0 +1,125 @@ +import mediapipe as mp +import time +import math +import numpy as np +import cv2 + +class handDetector(): + def __init__(self, mode=False, maxHands=2, modelComplexity=1,detectionCon=0.5, trackCon=0.5): + self.mode = mode + self.lmList = [] + self.maxHands = maxHands + self.modelComplex = modelComplexity + self.detectionCon = detectionCon + self.trackCon = trackCon + self.mpHands = mp.solutions.hands + self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex, self.detectionCon, self.trackCon) + # self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon) + self.mpDraw = mp.solutions.drawing_utils + self.tipIds = [4, 8, 12, 16, 20] + + def findHands(self, img, draw=True): + imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2.cvtColor() method is used to convert an image from one color space to another. + self.results = self.hands.process(imgRGB) #It then processes the RGB image to identify the hands in the image: + # print(self.results.multi_hand_landmarks) + + if self.results.multi_hand_landmarks: + for handLms in self.results.multi_hand_landmarks: + if draw: + self.mpDraw.draw_landmarks(img, handLms,self.mpHands.HAND_CONNECTIONS) + + return img + + def findPosition(self, img, handNo=0, draw=True): + xList = [] + yList = [] + bbox = [] + # self.lmList = [] + self.lmList = [] + # print(type(self.results.multi_hand_landmarks)) + if self.results.multi_hand_landmarks: + myHand = self.results.multi_hand_landmarks[handNo] + for id, lm in enumerate(myHand.landmark): + # print(id, lm) + h, w, c = img.shape # height, width, channel + cx, cy = int(lm.x * w), int(lm.y * h) + xList.append(cx) + yList.append(cy) + # print(id, cx, cy) + self.lmList.append([id, cx, cy]) + if draw: + cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED) + + xmin, xmax = min(xList), max(xList) + ymin, ymax = min(yList), max(yList) + bbox = xmin, ymin, xmax, ymax + + if draw: + cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20), + (0, 255, 0), 2) + + return self.lmList, bbox + + def fingersUp(self): + fingers = [] + # Thumb + # print(len(self.lmList)) + # print(self.lmList[self.tipIds[0]][1]) + # print(self.lmList[self.tipIds[0] - 1][1]) + if len(self.lmList) > 1: + if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]: + fingers.append(1) + else: + fingers.append(0) + + # Fingers + for id in range(1, 5): + if len(self.lmList) > 2: + if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]: + fingers.append(1) + else: + fingers.append(0) + + # totalFingers = fingers.count(1) + + return fingers + + def findDistance(self, p1, p2, img, draw=True, r=15, t=3): + x1, y1 = self.lmList[p1][1:] + x2, y2 = self.lmList[p2][1:] + cx, cy = (x1 + x2) // 2, (y1 + y2) // 2 + + if draw: + cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t) + cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED) + cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED) + cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED) + length = math.hypot(x2 - x1, y2 - y1) + + return length, img, [x1, y1, x2, y2, cx, cy] + + +def main(): + pTime = 0 + cTime = 0 + cap = cv2.VideoCapture(0) + detector = handDetector() + while True: + success, img = cap.read() + img = detector.findHands(img) + lmList, bbox = detector.findPosition(img) + if len(lmList) != 0: + print(lmList[4]) + + cTime = time.time() + fps = 1 / (cTime - pTime) + pTime = cTime + + cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,(255, 0, 255), 3) + + cv2.imshow("Image", img) + cv2.waitKey(1) + + +if __name__ == "__main__": + main() diff --git a/__pycache__/AiVirtualMouseProject.cpython-38.pyc b/__pycache__/AiVirtualMouseProject.cpython-38.pyc new file mode 100644 index 0000000..280a615 Binary files /dev/null and b/__pycache__/AiVirtualMouseProject.cpython-38.pyc differ diff --git a/__pycache__/HandTrackingModule.cpython-311.pyc b/__pycache__/HandTrackingModule.cpython-311.pyc new file mode 100644 index 0000000..5b76842 Binary files /dev/null and b/__pycache__/HandTrackingModule.cpython-311.pyc differ diff --git a/__pycache__/HandTrackingModule.cpython-38.pyc b/__pycache__/HandTrackingModule.cpython-38.pyc new file mode 100644 index 0000000..cfac991 Binary files /dev/null and b/__pycache__/HandTrackingModule.cpython-38.pyc differ diff --git a/__pycache__/Utility.cpython-38.pyc b/__pycache__/Utility.cpython-38.pyc new file mode 100644 index 0000000..2cfbe8b Binary files /dev/null and b/__pycache__/Utility.cpython-38.pyc differ diff --git a/__pycache__/app.cpython-38.pyc b/__pycache__/app.cpython-38.pyc new file mode 100644 index 0000000..66e23b8 Binary files /dev/null and b/__pycache__/app.cpython-38.pyc differ diff --git a/app.py b/app.py new file mode 100644 index 0000000..529b810 --- /dev/null +++ b/app.py @@ -0,0 +1,56 @@ +import eel +import os +from queue import Queue + +class ChatBot: + + started = False + userinputQueue = Queue() + + def isUserInput(): + return not ChatBot.userinputQueue.empty() + + def popUserInput(): + return ChatBot.userinputQueue.get() + + def close_callback(route, websockets): + # if not websockets: + # print('Bye!') + exit() + + @eel.expose + def getUserInput(msg): + ChatBot.userinputQueue.put(msg) + print(msg) + + def close(): + ChatBot.started = False + + def addUserMsg(msg): + eel.addUserMsg(msg) + + def addAppMsg(msg): + eel.addAppMsg(msg) + + def start(): + path = os.path.dirname(os.path.abspath(__file__)) + eel.init(path + r'\web', allowed_extensions=['.js', '.html']) + try: + eel.start('index.html', mode='chrome', + host='localhost', + port=27005, + block=False, + size=(350, 480), + position=(10,100), + disable_cache=True, + close_callback=ChatBot.close_callback) + ChatBot.started = True + while ChatBot.started: + try: + eel.sleep(10.0) + except: + #main thread exited + break + + except: + pass \ No newline at end of file diff --git a/open.py b/open.py new file mode 100644 index 0000000..e4eb705 --- /dev/null +++ b/open.py @@ -0,0 +1,16 @@ +import sys +import os +import tkinter as tk +from tkinter import * + +window=Tk() + +window.title("Running Python Script") +window.geometry('250x250') +def callback(): + with open("AiVirtualMouseProject.py", "r", encoding="utf-8") as file: + exec(file.read()) +b = tk.Button(window,text="Run Virtual Mouse",command=callback) +b.pack() + +window.mainloop() \ No newline at end of file diff --git a/proton.py b/proton.py new file mode 100644 index 0000000..aa3e542 --- /dev/null +++ b/proton.py @@ -0,0 +1,257 @@ +import pyttsx3 +import speech_recognition as sr +from datetime import date +import time +import webbrowser +import datetime +from pynput.keyboard import Key, Controller +import pyautogui +import sys +import os +from os import listdir +from os.path import isfile, join +import smtplib +import wikipedia +# import Gesture_Controller +import AiVirtualMouseProject +#import Gesture_Controller_Gloved as Gesture_Controller +import app +from threading import Thread + + +# -------------Object Initialization--------------- +today = date.today() +r = sr.Recognizer() +keyboard = Controller() +engine = pyttsx3.init('sapi5') +engine = pyttsx3.init() +voices = engine.getProperty('voices') +engine.setProperty('voice', voices[0].id) + +# ----------------Variables------------------------ +file_exp_status = False +files =[] +path = '' +is_awake = True #Bot status + +# ------------------Functions---------------------- +def reply(audio): + app.ChatBot.addAppMsg(audio) + + print(audio) + engine.say(audio) + engine.runAndWait() + + +def wish(): + hour = int(datetime.datetime.now().hour) + + if hour>=0 and hour<12: + reply("Good Morning!") + elif hour>=12 and hour<18: + reply("Good Afternoon!") + else: + reply("Good Evening!") + + reply("I am Proton, how may I help you?") + +# Set Microphone parameters +with sr.Microphone() as source: + r.energy_threshold = 500 + r.dynamic_energy_threshold = False + +# Audio to String +def record_audio(): + with sr.Microphone() as source: + r.pause_threshold = 0.8 + voice_data = '' + audio = r.listen(source, phrase_time_limit=5) + + try: + voice_data = r.recognize_google(audio) + except sr.RequestError: + reply('Sorry my Service is down. Plz check your Internet connection') + except sr.UnknownValueError: + print('cant recognize') + pass + print(voice_data.lower()) + return voice_data.lower() + + +# Executes Commands (input: string) +def respond(voice_data): + global file_exp_status, files, is_awake, path + print(voice_data) + voice_data.replace('proton','') + app.eel.addUserMsg(voice_data) + + if is_awake==False: + if 'wake up' in voice_data: + is_awake = True + wish() + + # STATIC CONTROLS + elif 'hello' in voice_data: + wish() + + elif 'what is your name' in voice_data: + reply('My name is Proton!') + + elif 'date' in voice_data: + reply(today.strftime("%B %d, %Y")) + + elif 'time' in voice_data: + reply(str(datetime.datetime.now()).split(" ")[1].split('.')[0]) + + elif 'search' in voice_data: + reply('Searching for ' + voice_data.split('search')[1]) + url = 'https://google.com/search?q=' + voice_data.split('search')[1] + try: + webbrowser.get().open(url) + reply('This is what I found Sir') + except: + reply('Please check your Internet') + + elif 'location' in voice_data: + reply('Which place are you looking for ?') + temp_audio = record_audio() + app.eel.addUserMsg(temp_audio) + reply('Locating...') + url = 'https://google.nl/maps/place/' + temp_audio + '/&' + try: + webbrowser.get().open(url) + reply('This is what I found Sir') + except: + reply('Please check your Internet') + + elif ('bye' in voice_data) or ('by' in voice_data) or ('bhai' in voice_data): + reply("Good bye Sir! Have a nice day.") + is_awake = False + + elif ('exit' in voice_data) or ('terminate' in voice_data): + # if AiVirtualMouseProject.gc_mode: + # AiVirtualMouseProject.gc_mode = 0 + app.ChatBot.close() + #sys.exit() always raises SystemExit, Handle it in main loop + sys.exit() + + + # DYNAMIC CONTROLS + elif 'launch app' in voice_data: + # if AiVirtualMouseProject.gc_mode: + # reply('Gesture recognition is already active') + # else: + # gc = AiVirtualMouseProject.Gesture_Controller() + # t = Thread(target = gc.start) + # t.start() + reply('Launched Successfully') + AiVirtualMouseProject.Gesture_Controller() + + + elif ('stop app' in voice_data) or ('top app' in voice_data): + if AiVirtualMouseProject.gc_mode: + AiVirtualMouseProject.gc_mode = 0 + reply('Gesture recognition stopped') + else: + reply('Gesture recognition is already inactive') + + elif 'copy' in voice_data: + with keyboard.pressed(Key.ctrl): + keyboard.press('c') + keyboard.release('c') + reply('Copied') + + elif 'page' in voice_data or 'pest' in voice_data or 'paste' in voice_data: + with keyboard.pressed(Key.ctrl): + keyboard.press('v') + keyboard.release('v') + reply('Pasted') + + # File Navigation (Default Folder set to C://) + elif 'list' in voice_data: + # only permission is to access users which is on number 24 + counter = 0 + path = 'C://' + files = listdir(path) + filestr = "" + for f in files: + counter+=1 + print(str(counter) + ': ' + f) + filestr += str(counter) + ': ' + f + '
' + file_exp_status = True + reply('These are the files in your root directory') + app.ChatBot.addAppMsg(filestr) + + elif file_exp_status == True: + counter = 0 + if 'open' in voice_data: + if isfile(join(path,files[int(voice_data.split(' ')[-1])-1])): + os.startfile(path + files[int(voice_data.split(' ')[-1])-1]) + file_exp_status = False + else: + try: + path = path + files[int(voice_data.split(' ')[-1])-1] + '//' + print(path) + files = listdir(path) + filestr = "" + print(files) + for f in files: + counter+=1 + filestr += str(counter) + ': ' + f + '
' + print(str(counter) + ': ' + f) + reply('Opened Successfully') + app.ChatBot.addAppMsg(filestr) + + except: + reply('You do not have permission to access this folder') + + if 'back' in voice_data: + filestr = "" + if path == 'C://': + reply('Sorry, this is the root directory') + else: + a = path.split('//')[:-2] + path = '//'.join(a) + path += '//' + files = listdir(path) + for f in files: + counter+=1 + filestr += str(counter) + ': ' + f + '
' + print(str(counter) + ': ' + f) + reply('ok') + app.ChatBot.addAppMsg(filestr) + + else: + reply('I am not functioned to do this !') + +# ------------------Driver Code-------------------- + +t1 = Thread(target = app.ChatBot.start) +t1.start() + +# Lock main thread until Chatbot has started +while not app.ChatBot.started: + time.sleep(0.5) + +wish() +voice_data = None +while True: + if app.ChatBot.isUserInput(): + #take input from GUI + voice_data = app.ChatBot.popUserInput() + else: + #take input from Voice + voice_data = record_audio() + + #process voice_data + if 'proton' in voice_data: + try: + #Handle sys.exit() + respond(voice_data) + except SystemExit: + reply("Exit Successfull") + break + except: + #some other exception got raised + print("EXCEPTION raised while closing.") + break \ No newline at end of file diff --git a/web/css/back.png b/web/css/back.png new file mode 100644 index 0000000..4848dda Binary files /dev/null and b/web/css/back.png differ diff --git a/web/css/jquery.convform.css b/web/css/jquery.convform.css new file mode 100644 index 0000000..13ffadd --- /dev/null +++ b/web/css/jquery.convform.css @@ -0,0 +1,363 @@ +* { + margin: 0px; + padding: 0px; + /* font-family: sans-serif; */ + font-family: "Comic Sans MS"; +} + +div.convFormDynamic { + width: calc(100% - 33px); + margin: 10px auto 15px; + padding: 0 !important; + position: relative; + /* box-shadow: 0 0 5px 5px rgba(222, 222, 222, 0.4); */ +} + +div.convFormDynamic input.userInputDynamic { + border: none; + padding: 6px 6px; + outline: none; + font-size: 0.905rem; + float: left; + width: 72%; + height: 25%; + line-height: 1.3em; + min-height: 1.7em; + max-height: 10rem; + display: block; + max-width: 72%; + margin-right: 2.5%; + + background: rgba(24, 24, 16, .2); + border-radius: 2em; + backdrop-filter: blur(15px); + border: 2px solid rgba(255, 255, 255, 0.05); + background-clip: padding-box; + box-shadow: 10px 10px 10px rgba(46, 54, 68, 0.03); +} +div.conv-form-wrapper:after { + content: ''; + display: block; + clear: both; + width: 100%; + height: 1px; +} +div.conv-form-wrapper div#messages { + max-height: 71vh; + padding-left: 10px; + padding-right: 10px; + height: auto !important; + overflow-y: scroll; + scrollbar-width: none; + -ms-overflow-style: none; /* IE 11 */ +} +div.conv-form-wrapper * { + scrollbar-width: none; + -ms-overflow-style: none; +} +div.conv-form-wrapper div#messages:after { + content: ''; + display: table; + clear: both; +} +div.conv-form-wrapper { + position: relative; +} +div.conv-form-wrapper div.wrapper-messages { + position: relative; + height: 600px; + overflow-y: scroll; + transition: margin 0.1s; +} +div.conv-form-wrapper:before { + content: ''; + position: absolute; + width: 100%; + display: block; + height: 10px; + top: 0; + left: 0; + z-index: 2; + background: linear-gradient(#000000, transparent); +} +@media (max-width: 767px) { + div.conv-form-wrapper div.wrapper-messages, div.conv-form-wrapper div#messages { + max-height: 71vh; + } +} + +div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar, div.conv-form-wrapper div.options::-webkit-scrollbar { + width: 0px; + height: 0px; + /* remove scrollbar space */ + background: transparent; + /* optional: just make scrollbar invisible */ +} + +input.userInputDynamic.error { + color: #ac0000 !important; +} +input.userInputDynamic { + border-radius: 3px; + margin: 7px 10px; +} + +div.conv-form-wrapper div#messages { + position: relative; + bottom: 0; + margin-left: -5px; + height: auto !important; + width: 97%; + padding-bottom: 20px; +} +div.conv-form-wrapper div.options { + word-wrap: normal; + white-space: nowrap; + overflow-x: scroll; + position: absolute; + bottom: 100%; + width: 100%; + transform: translateY(-5px); +} + +div.conv-form-wrapper div.message:after { + content: ''; + display: table; + clear: both; +} +div.conv-form-wrapper div.message.ready.rtol { + animation: slideRtoLIn 0.5s ease; + transform-origin: 0 0 0; +} +div.conv-form-wrapper div.message.ready.ltor { + animation: slideLtoRIn 0.5s ease; + transform-origin: 0 0 0; +} +div.conv-form-wrapper div#messages div.message { + border-radius: 20px; + padding: 12px 22px; + font-size: 0.905rem; + color: #333; + display: inline-block; + padding: 10px 15px 8px; + border-radius: 20px; + margin-bottom: 5px; + float: right; + clear: both; + max-width: 65%; + word-wrap: break-word; +} +/* div.conv-form-wrapper div#messages { + background: linear-gradient(to right, #388eff 29%, #70aeff 81%); + color: #fff; + float: left; + border-top-left-radius: 0; +} */ +div.conv-form-wrapper div#messages div.message.to{ + float:left; + border-top-left-radius: 0; +} +div.message.to { + background: rgba(24, 24, 16, .2); + border-radius: 2em; + backdrop-filter: blur(15px); + border: 2px solid rgba(255, 255, 255, 0.05); + background-clip: padding-box; + box-shadow: 10px 10px 10px rgba(46, 54, 68, 0.03); + color: white; +} + +/* div.conv-form-wrapper div#messages div.message.from { + background: linear-gradient(to left, #38ff84 0%, #98fbbe 100%); + color: #000000; + border-top-right-radius: 0; +} */ +div.conv-form-wrapper div#messages div.message.from{ + float:right; + border-top-left-radius: 0; +} +div.message.from { + background: rgba(24, 24, 16, .2); + border-radius: 2em; + backdrop-filter: blur(15px); + border: 2px solid rgba(255, 255, 255, 0.05); + background-clip: padding-box; + box-shadow: 10px 10px 10px rgba(46, 54, 68, 0.03); + color: white; +} +.message.to+.message.from, .message.from+.message.to { + margin-top: 15px; +} +@keyframes slideRtoLIn { + 0% { + margin-right: -50px; + } + + 100% { + margin-right: 0px; + } +} +@keyframes slideLtoRIn { + 0% { + margin-left: -50px; + } + + 100% { + margin-left: 0; + } +} + +div.convFormDynamic button.submit { + padding: 3px; + border: none; + float: left; + margin: 5px; + color: black; + cursor: pointer; + border-radius: 8px; + font-size: 1.1rem; + width: 36px; + height: 35px; + margin-top: 8px; + /* background: #fff; */ + outline: none !important; + + background: rgba(24, 24, 16, .2); + border-radius: 10px; + backdrop-filter: blur(15px); + border: 2px solid rgba(255, 255, 255, 0.05); + background-clip: padding-box; + box-shadow: 10px 10px 10px rgba(46, 54, 68, 0.03); +} + + +div.convFormDynamic button.submit:hover { + background: rgba(24, 24, 16, 0.5); + border-radius: 10px; + backdrop-filter: blur(15px); + border: 2px solid rgba(255, 255, 255, 0.05); + background-clip: padding-box; + box-shadow: 10px 10px 10px rgba(46, 54, 68, 0.03); +} + +button.submit.glow { + box-shadow: 0 0 10px 5px rgba(6, 197, 166, 0.4); +} +.no-border { + border: none !important; +} +.dragscroll { + cursor: grab; +} +div.conv-form-wrapper div#messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar { + width: 0px; + /* remove scrollbar space */ + background: transparent; + /* optional: just make scrollbar invisible */ +} +span.clear { + display: block; + clear: both; +} + + +.header-wrapper { + background-color: #000000; + color: white; + width: auto; + height: 20px; + top: 0px; + left: 0px; + padding-left: 10px; + padding-top: 5px; + padding-bottom: 25px; + padding-right: 20px; + text-align: right; +} +.logo{ + position: fixed; + top: 0px; + left: 5%; + padding: 10px; + padding-bottom: 20px; +} + +.chatBody { + overflow:hidden; + + width: 100vw; + min-height: 100vh; + background: linear-gradient(to bottom right, rgba(252, 203, 144, 1), rgba(213, 126, 235, 1)); +} + + +.awesome { + font-family: "Comic Sans MS"; + + width:100%; + margin-top: 12px; + margin-left: 12px; + text-align: center; + + color:#313131; + font-size:16px; + font-weight: bold; + position: absolute; + -webkit-animation:colorchange 20s infinite alternate; +} + +@-webkit-keyframes colorchange { + 0% { + + color: lightblue; + } + + 10% { + + color: #8e44ad; + } + + 20% { + + color: #1abc9c; + } + + 30% { + + color: #7afad8; + } + + 40% { + + color: lightblue; + } + + 50% { + + color: #70b8ff; + } + + 60% { + + color: lightblue; + } + + 70% { + + color: #2980b9; + } + 80% { + + color: #67d962; + } + + 90% { + + color: #62c7d9; + } + + 100% { + + color: #9ee84f; + } +} diff --git a/web/images/icon.png b/web/images/icon.png new file mode 100644 index 0000000..0a6c48e Binary files /dev/null and b/web/images/icon.png differ diff --git a/web/index.html b/web/index.html new file mode 100644 index 0000000..48a96d2 --- /dev/null +++ b/web/index.html @@ -0,0 +1,43 @@ + + + + Proton + + + + + +
+
+

PROTON Welcomes you!

+ +
+
+ + +
+ +
+ +
+ +
+
+

+
+
+ +
+
+ + + +
+ +
+ + + + diff --git a/web/js/main.js b/web/js/main.js new file mode 100644 index 0000000..ea111bf --- /dev/null +++ b/web/js/main.js @@ -0,0 +1,49 @@ + +//user clicked button +document.getElementById("userInputButton").addEventListener("click", getUserInput, false); +//user pressed enter '13' +document.getElementById("userInput").addEventListener("keyup", function (event) { + if (event.keyCode === 13) { + //cancel the default action + event.preventDefault(); + //process event + getUserInput(); + } +}); + +eel.expose(addUserMsg); +eel.expose(addAppMsg); + + +function addUserMsg(msg) { + element = document.getElementById("messages"); + element.innerHTML += '
' + msg + '
'; + element.scrollTop = element.scrollHeight - element.clientHeight - 15; + //add delay for animation to complete and then modify class to => "message from" + index = element.childElementCount - 1; + setTimeout(changeClass.bind(null, element, index, "message from"), 500); +} + +function addAppMsg(msg) { + element = document.getElementById("messages"); + element.innerHTML += '
' + msg + '
'; + element.scrollTop = element.scrollHeight - element.clientHeight - 15; + //add delay for animation to complete and then modify class to => "message to" + index = element.childElementCount - 1; + setTimeout(changeClass.bind(null, element, index, "message to"), 500); +} + +function changeClass(element, index, newClass) { + console.log(newClass +' '+ index); + element.children[index].className = newClass; +} + + +function getUserInput() { + element = document.getElementById("userInput"); + msg = element.value; + if (msg.length != 0) { + element.value = ""; + eel.getUserInput(msg); + } +} \ No newline at end of file