diff --git a/AiVirtualMouseProject.py b/AiVirtualMouseProject.py
deleted file mode 100644
index 4376b7a..0000000
--- a/AiVirtualMouseProject.py
+++ /dev/null
@@ -1,169 +0,0 @@
-import sys
-sys.path.append('Users\prath\AppData\Local\Programs\Python\Python38\Lib\site-packages')
-import cv2
-from pynput.keyboard import Key, Listener
-import numpy as np
-from pynput.keyboard import Key
-import HandTrackingModule as htm
-import time
-import autopy
-import keyboard
-import pyautogui as py
-
-
-def show(key):
- if key == Key.esc:
- return True
- else :
- return False
-
-def Gesture_Controller():
- gc_mode = 0
- flag = False
-
- ##########################
- wCam, hCam = 640, 480
- frameR = 100 # Frame Reduction
- smoothening = 7
- #########################
-
- pTime = 0
- plocX, plocY = 0, 0
- clocX, clocY = 0, 0
-
- # cv2.VideoCapture(video_path or device index )
- # device index: It is just the number to specify the camera. Its possible values ie either 0 or -1.
- cap = cv2.VideoCapture(0)
- cap.set(3, wCam) #set width of cam
- cap.set(4, hCam) #set height of cam
- detector = htm.handDetector(maxHands=1)
- wScr, hScr = autopy.screen.size() #screen size of device in which program is open
- # print(wScr, hScr)
-
-
- while True:
- # 1. Find hand Landmarks
- success, img = cap.read() #cap.read() returns a bool (True/False) saved in success. If the frame is read correctly,
- # it will be true and store in img
- img = detector.findHands(img)
- lmList, bbox = detector.findPosition(img)
- # 2. Get the tip of the index and middle fingers
- if len(lmList) != 0:
- x1, y1 = lmList[8][1:]
- x2, y2 = lmList[12][1:]
- # print(x1, y1, x2, y2)
-
-
- # 3. Check which fingers are up
- fingers = detector.fingersUp()
-
-
- # Scroll up
- if len(fingers) > 4 and fingers[0] == 0 and fingers[1]==1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 1:
- length, img, lineInfo = detector.findDistance(4, 8, img)
- # print("IN FUN2")
- cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
- py.scroll(50)
-
- # drag drop item drop
- if len(fingers) > 4 and fingers[0]==1 and fingers[1] == 1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 1:
- print("CALL")
- flag = False
- py.mouseUp(button='left')
- if (len(fingers)>3 and fingers[3] == 0) or (len(fingers)>4 and fingers[4] == 0):
-
- # 4. Only Index Finger : Moving Mode
- if len(fingers)>4 and fingers[1] == 1 and fingers[2] == 1 and fingers[3]==0 and fingers[4]==0:
- # if len(fingers)>2 and fingers[1] == 1 and fingers[2] == 1:
- length, img, lineInfo = detector.findDistance(8, 12, img)
-
- # 5. Convert Coordinates
- x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr))
- y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr))
- # 6. Smoothen Values
- clocX = plocX + (x3 - plocX) / smoothening
- clocY = plocY + (y3 - plocY) / smoothening
-
- # 7. Move Mouse
- if length > 40:
- autopy.mouse.move(wScr - clocX, clocY)
- cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
- cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
- plocX, plocY = clocX, clocY
-
- # 8. Both Index and middle fingers are up : Clicking Mode Right CLick
- # if len(fingers) > 2 and fingers[1] == 0 and fingers[2] == 1:
- if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 0 and fingers[2] == 1 and fingers[3] == 0 and fingers[4] == 0:
- # 9. Find distance between fingers
- length, img, lineInfo = detector.findDistance(8, 12, img)
- # print(length)
- # 10. Click mouse if distance short
- if length > 30:
- cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
- py.click(button = 'left')
-
- # 8. Both Index and middle fingers are up : Clicking Mode Left CLick
- # if len(fingers) > 2 and fingers[1] == 1 and fingers[2] == 0:
- if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 1 and fingers[2] == 0 and fingers[3] == 0 and fingers[4] == 0:
- # 9. Find distance between fingers
- length, img, lineInfo = detector.findDistance(8, 12, img)
- # print(length)
- # 10. Click mouse if distance short
- if length > 30:
- cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
- py.click(button = 'right')
-
- # Double Click
- if len(fingers) > 4 and fingers[1] == 1 and fingers[2] == 1 and fingers[0]==0 and fingers[3]==0 and fingers[4]==0:
- # 9. Find distance between fingers
- length, img, lineInfo = detector.findDistance(8, 12, img)
- # print(length)
- # 10. Click mouse if distance short
- if length < 30:
- cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
- py.doubleClick()
-
- # Scroll Down
- if len(fingers) > 4 and fingers[0] == 0 and fingers[1]==1 and fingers[2] == 1 and fingers[3] == 1 and fingers[4] == 0:
- length, img, lineInfo = detector.findDistance(4, 8, img)
- # print("IN FUN")
- cv2.circle(img, (lineInfo[4], lineInfo[5]),15, (0, 255, 0), cv2.FILLED)
- py.scroll(-50)
-
- # Drag and Drop
- if len(fingers) > 4 and fingers[0] == 0 and fingers[1] == 0 and fingers[2] == 0 and fingers[3] == 0 and fingers[4] == 0:
- length, img, lineInfo = detector.findDistance(8, 12, img)
-
- # 5. Convert Coordinates
- x3 = np.interp(x1, (frameR, wCam - frameR), (0, wScr))
- y3 = np.interp(y1, (frameR, hCam - frameR), (0, hScr))
- # 6. Smoothen Values
- clocX = plocX + (x3 - plocX) / smoothening
- clocY = plocY + (y3 - plocY) / smoothening
-
- # 7. Move Mouse
- # py.mouseDown(button='left')
- if not flag:
- print("CALL IN")
- flag = True
- py.mouseDown(button='left')
- print("TEMP")
- autopy.mouse.move(wScr - clocX, clocY)
- cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
- cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
- plocX, plocY = clocX, clocY
-
- # 11. Frame Rate
- cTime = time.time()
- fps = 1 / (cTime - pTime)
- pTime = cTime
- img = cv2.flip(img,1)
- cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3,
- (255, 0, 0), 3)
- # 12. Display
- cv2.imshow("Image", img)
- cv2.waitKey(1)
- if keyboard.is_pressed('esc'):
- cv2.destroyAllWindows()
- cap.release()
- break
\ No newline at end of file
diff --git a/HandTrackingModule.py b/HandTrackingModule.py
deleted file mode 100644
index 4bd0b15..0000000
--- a/HandTrackingModule.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import mediapipe as mp
-import time
-import math
-import numpy as np
-import cv2
-
-class handDetector():
- def __init__(self, mode=False, maxHands=2, modelComplexity=1,detectionCon=0.5, trackCon=0.5):
- self.mode = mode
- self.lmList = []
- self.maxHands = maxHands
- self.modelComplex = modelComplexity
- self.detectionCon = detectionCon
- self.trackCon = trackCon
- self.mpHands = mp.solutions.hands
- self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex, self.detectionCon, self.trackCon)
- # self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
- self.mpDraw = mp.solutions.drawing_utils
- self.tipIds = [4, 8, 12, 16, 20]
-
- def findHands(self, img, draw=True):
- imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2.cvtColor() method is used to convert an image from one color space to another.
- self.results = self.hands.process(imgRGB) #It then processes the RGB image to identify the hands in the image:
- # print(self.results.multi_hand_landmarks)
-
- if self.results.multi_hand_landmarks:
- for handLms in self.results.multi_hand_landmarks:
- if draw:
- self.mpDraw.draw_landmarks(img, handLms,self.mpHands.HAND_CONNECTIONS)
-
- return img
-
- def findPosition(self, img, handNo=0, draw=True):
- xList = []
- yList = []
- bbox = []
- # self.lmList = []
- self.lmList = []
- # print(type(self.results.multi_hand_landmarks))
- if self.results.multi_hand_landmarks:
- myHand = self.results.multi_hand_landmarks[handNo]
- for id, lm in enumerate(myHand.landmark):
- # print(id, lm)
- h, w, c = img.shape # height, width, channel
- cx, cy = int(lm.x * w), int(lm.y * h)
- xList.append(cx)
- yList.append(cy)
- # print(id, cx, cy)
- self.lmList.append([id, cx, cy])
- if draw:
- cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
-
- xmin, xmax = min(xList), max(xList)
- ymin, ymax = min(yList), max(yList)
- bbox = xmin, ymin, xmax, ymax
-
- if draw:
- cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20),
- (0, 255, 0), 2)
-
- return self.lmList, bbox
-
- def fingersUp(self):
- fingers = []
- # Thumb
- # print(len(self.lmList))
- # print(self.lmList[self.tipIds[0]][1])
- # print(self.lmList[self.tipIds[0] - 1][1])
- if len(self.lmList) > 1:
- if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
- fingers.append(1)
- else:
- fingers.append(0)
-
- # Fingers
- for id in range(1, 5):
- if len(self.lmList) > 2:
- if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
- fingers.append(1)
- else:
- fingers.append(0)
-
- # totalFingers = fingers.count(1)
-
- return fingers
-
- def findDistance(self, p1, p2, img, draw=True, r=15, t=3):
- x1, y1 = self.lmList[p1][1:]
- x2, y2 = self.lmList[p2][1:]
- cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
-
- if draw:
- cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
- cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
- cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
- cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
- length = math.hypot(x2 - x1, y2 - y1)
-
- return length, img, [x1, y1, x2, y2, cx, cy]
-
-
-def main():
- pTime = 0
- cTime = 0
- cap = cv2.VideoCapture(0)
- detector = handDetector()
- while True:
- success, img = cap.read()
- img = detector.findHands(img)
- lmList, bbox = detector.findPosition(img)
- if len(lmList) != 0:
- print(lmList[4])
-
- cTime = time.time()
- fps = 1 / (cTime - pTime)
- pTime = cTime
-
- cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,(255, 0, 255), 3)
-
- cv2.imshow("Image", img)
- cv2.waitKey(1)
-
-
-if __name__ == "__main__":
- main()
diff --git a/RegisterUi.py b/RegisterUi.py
deleted file mode 100644
index 19b8631..0000000
--- a/RegisterUi.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Form implementation generated from reading ui file 'RegisterUi.ui'
-#
-# Created by: PyQt5 UI code generator 5.15.4
-#
-# WARNING: Any manual changes made to this file will be lost when pyuic5 is
-# run again. Do not edit this file unless you know what you are doing.
-
-
diff --git a/app.py b/app.py
deleted file mode 100644
index 529b810..0000000
--- a/app.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import eel
-import os
-from queue import Queue
-
-class ChatBot:
-
- started = False
- userinputQueue = Queue()
-
- def isUserInput():
- return not ChatBot.userinputQueue.empty()
-
- def popUserInput():
- return ChatBot.userinputQueue.get()
-
- def close_callback(route, websockets):
- # if not websockets:
- # print('Bye!')
- exit()
-
- @eel.expose
- def getUserInput(msg):
- ChatBot.userinputQueue.put(msg)
- print(msg)
-
- def close():
- ChatBot.started = False
-
- def addUserMsg(msg):
- eel.addUserMsg(msg)
-
- def addAppMsg(msg):
- eel.addAppMsg(msg)
-
- def start():
- path = os.path.dirname(os.path.abspath(__file__))
- eel.init(path + r'\web', allowed_extensions=['.js', '.html'])
- try:
- eel.start('index.html', mode='chrome',
- host='localhost',
- port=27005,
- block=False,
- size=(350, 480),
- position=(10,100),
- disable_cache=True,
- close_callback=ChatBot.close_callback)
- ChatBot.started = True
- while ChatBot.started:
- try:
- eel.sleep(10.0)
- except:
- #main thread exited
- break
-
- except:
- pass
\ No newline at end of file
diff --git a/open.py b/open.py
deleted file mode 100644
index e4eb705..0000000
--- a/open.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import sys
-import os
-import tkinter as tk
-from tkinter import *
-
-window=Tk()
-
-window.title("Running Python Script")
-window.geometry('250x250')
-def callback():
- with open("AiVirtualMouseProject.py", "r", encoding="utf-8") as file:
- exec(file.read())
-b = tk.Button(window,text="Run Virtual Mouse",command=callback)
-b.pack()
-
-window.mainloop()
\ No newline at end of file
diff --git a/proton.py b/proton.py
deleted file mode 100644
index 2e9e6e0..0000000
--- a/proton.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import pyttsx3
-import speech_recognition as sr
-from datetime import date
-import time
-import webbrowser
-import datetime
-from pynput.keyboard import Key, Controller
-import pyautogui
-import sys
-import os
-from os import listdir
-from os.path import isfile, join
-import smtplib
-import wikipedia
-# import Gesture_Controller
-import AiVirtualMouseProject
-#import Gesture_Controller_Gloved as Gesture_Controller
-import app
-from threading import Thread
-
-
-# -------------Object Initialization---------------
-today = date.today()
-r = sr.Recognizer()
-keyboard = Controller()
-engine = pyttsx3.init('sapi5')
-engine = pyttsx3.init()
-voices = engine.getProperty('voices')
-engine.setProperty('voice', voices[0].id)
-
-# ----------------Variables------------------------
-file_exp_status = False
-files =[]
-path = ''
-is_awake = True #Bot status
-
-# ------------------Functions----------------------
-def reply(audio):
- app.ChatBot.addAppMsg(audio)
-
- print(audio)
- engine.say(audio)
- engine.runAndWait()
-
-
-def wish():
- hour = int(datetime.datetime.now().hour)
-
- if hour>=0 and hour<12:
- reply("Good Morning!")
- elif hour>=12 and hour<18:
- reply("Good Afternoon!")
- else:
- reply("Good Evening!")
-
- reply("I am Proton, how may I help you?")
-
-# Set Microphone parameters
-with sr.Microphone() as source:
- r.energy_threshold = 500
- r.dynamic_energy_threshold = False
-
-# Audio to String
-def record_audio():
- with sr.Microphone() as source:
- r.pause_threshold = 0.8
- voice_data = ''
- audio = r.listen(source, phrase_time_limit=5)
-
- try:
- voice_data = r.recognize_google(audio)
- except sr.RequestError:
- reply('Sorry my Service is down. Plz check your Internet connection')
- except sr.UnknownValueError:
- print('cant recognize')
- pass
- return voice_data.lower()
-
-
-# Executes Commands (input: string)
-def respond(voice_data):
- global file_exp_status, files, is_awake, path
- print(voice_data)
- voice_data.replace('proton','')
- app.eel.addUserMsg(voice_data)
-
- if is_awake==False:
- if 'wake up' in voice_data:
- is_awake = True
- wish()
-
- # STATIC CONTROLS
- elif 'hello' in voice_data:
- wish()
-
- elif 'what is your name' in voice_data:
- reply('My name is Proton!')
-
- elif 'date' in voice_data:
- reply(today.strftime("%B %d, %Y"))
-
- elif 'time' in voice_data:
- reply(str(datetime.datetime.now()).split(" ")[1].split('.')[0])
-
- elif 'search' in voice_data:
- reply('Searching for ' + voice_data.split('search')[1])
- url = 'https://google.com/search?q=' + voice_data.split('search')[1]
- try:
- webbrowser.get().open(url)
- reply('This is what I found Sir')
- except:
- reply('Please check your Internet')
-
- elif 'location' in voice_data:
- reply('Which place are you looking for ?')
- temp_audio = record_audio()
- app.eel.addUserMsg(temp_audio)
- reply('Locating...')
- url = 'https://google.nl/maps/place/' + temp_audio + '/&'
- try:
- webbrowser.get().open(url)
- reply('This is what I found Sir')
- except:
- reply('Please check your Internet')
-
- elif ('bye' in voice_data) or ('by' in voice_data):
- reply("Good bye Sir! Have a nice day.")
- is_awake = False
-
- elif ('exit' in voice_data) or ('terminate' in voice_data):
- if AiVirtualMouseProject.gc_mode:
- AiVirtualMouseProject.gc_mode = 0
- app.ChatBot.close()
- #sys.exit() always raises SystemExit, Handle it in main loop
- sys.exit()
-
-
- # DYNAMIC CONTROLS
- elif 'launch app' in voice_data:
- # if AiVirtualMouseProject.gc_mode:
- # reply('Gesture recognition is already active')
- # else:
- # gc = AiVirtualMouseProject.Gesture_Controller()
- # t = Thread(target = gc.start)
- # t.start()
- reply('Launched Successfully')
- AiVirtualMouseProject.Gesture_Controller()
-
-
- elif ('stop gesture recognition' in voice_data) or ('top gesture recognition' in voice_data):
- if AiVirtualMouseProject.gc_mode:
- AiVirtualMouseProject.gc_mode = 0
- reply('Gesture recognition stopped')
- else:
- reply('Gesture recognition is already inactive')
-
- elif 'copy' in voice_data:
- with keyboard.pressed(Key.ctrl):
- keyboard.press('c')
- keyboard.release('c')
- reply('Copied')
-
- elif 'page' in voice_data or 'pest' in voice_data or 'paste' in voice_data:
- with keyboard.pressed(Key.ctrl):
- keyboard.press('v')
- keyboard.release('v')
- reply('Pasted')
-
- # File Navigation (Default Folder set to C://)
- elif 'list' in voice_data:
- counter = 0
- path = 'C://'
- files = listdir(path)
- filestr = ""
- for f in files:
- counter+=1
- print(str(counter) + ': ' + f)
- filestr += str(counter) + ': ' + f + '
'
- file_exp_status = True
- reply('These are the files in your root directory')
- app.ChatBot.addAppMsg(filestr)
-
- elif file_exp_status == True:
- counter = 0
- if 'open' in voice_data:
- if isfile(join(path,files[int(voice_data.split(' ')[-1])-1])):
- os.startfile(path + files[int(voice_data.split(' ')[-1])-1])
- file_exp_status = False
- else:
- try:
- path = path + files[int(voice_data.split(' ')[-1])-1] + '//'
- files = listdir(path)
- filestr = ""
- for f in files:
- counter+=1
- filestr += str(counter) + ': ' + f + '
'
- print(str(counter) + ': ' + f)
- reply('Opened Successfully')
- app.ChatBot.addAppMsg(filestr)
-
- except:
- reply('You do not have permission to access this folder')
-
- if 'back' in voice_data:
- filestr = ""
- if path == 'C://':
- reply('Sorry, this is the root directory')
- else:
- a = path.split('//')[:-2]
- path = '//'.join(a)
- path += '//'
- files = listdir(path)
- for f in files:
- counter+=1
- filestr += str(counter) + ': ' + f + '
'
- print(str(counter) + ': ' + f)
- reply('ok')
- app.ChatBot.addAppMsg(filestr)
-
- else:
- reply('I am not functioned to do this !')
-
-# ------------------Driver Code--------------------
-
-t1 = Thread(target = app.ChatBot.start)
-t1.start()
-
-# Lock main thread until Chatbot has started
-while not app.ChatBot.started:
- time.sleep(0.5)
-
-wish()
-voice_data = None
-while True:
- if app.ChatBot.isUserInput():
- #take input from GUI
- voice_data = app.ChatBot.popUserInput()
- else:
- #take input from Voice
- voice_data = record_audio()
-
- #process voice_data
- if 'proton' in voice_data:
- try:
- #Handle sys.exit()
- respond(voice_data)
- except SystemExit:
- reply("Exit Successfull")
- break
- except:
- #some other exception got raised
- print("EXCEPTION raised while closing.")
- break
\ No newline at end of file
diff --git a/python.py b/python.py
deleted file mode 100644
index b089568..0000000
--- a/python.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Form implementation generated from reading ui file 'project.ui'
-#
-# Created by: PyQt5 UI code generator 5.15.4
-#
-# WARNING: Any manual changes made to this file will be lost when pyuic5 is
-# run again. Do not edit this file unless you know what you are doing.
-
-
diff --git a/web/css/back.png b/web/css/back.png
deleted file mode 100644
index 4848dda..0000000
Binary files a/web/css/back.png and /dev/null differ
diff --git a/web/css/jquery.convform.css b/web/css/jquery.convform.css
deleted file mode 100644
index 2def61b..0000000
--- a/web/css/jquery.convform.css
+++ /dev/null
@@ -1,367 +0,0 @@
-* {
- margin: 0px;
- padding: 0px;
- /* font-family: sans-serif; */
- font-family: "Comic Sans MS";
-}
-
-div.convFormDynamic {
- width: calc(100% - 33px);
- margin: 10px auto 15px;
- padding: 0 !important;
- position: relative;
- /* box-shadow: 0 0 5px 5px rgba(222, 222, 222, 0.4); */
-}
-
-div.convFormDynamic input.userInputDynamic {
- border: none;
- padding: 6px 6px;
- outline: none;
- font-size: 0.905rem;
- float: left;
- width: 72%;
- height: 25%;
- line-height: 1.3em;
- min-height: 1.7em;
- max-height: 10rem;
- display: block;
- max-width: 72%;
- margin-right: 2.5%;
-}
-div.conv-form-wrapper:after {
- content: '';
- display: block;
- clear: both;
- width: 100%;
- height: 1px;
-}
-div.conv-form-wrapper div#messages {
- max-height: 71vh;
- padding-left: 10px;
- padding-right: 10px;
- height: auto !important;
- overflow-y: scroll;
- scrollbar-width: none;
- -ms-overflow-style: none; /* IE 11 */
-}
-div.conv-form-wrapper * {
- scrollbar-width: none;
- -ms-overflow-style: none;
-}
-div.conv-form-wrapper div#messages:after {
- content: '';
- display: table;
- clear: both;
-}
-div.conv-form-wrapper {
- position: relative;
-}
-div.conv-form-wrapper div.wrapper-messages {
- position: relative;
- height: 600px;
- overflow-y: scroll;
- transition: margin 0.1s;
-}
-div.conv-form-wrapper:before {
- content: '';
- position: absolute;
- width: 100%;
- display: block;
- height: 10px;
- top: 0;
- left: 0;
- z-index: 2;
- background: linear-gradient(#000000, transparent);
-}
-@media (max-width: 767px) {
- div.conv-form-wrapper div.wrapper-messages, div.conv-form-wrapper div#messages {
- max-height: 71vh;
- }
-}
-
-div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar, div.conv-form-wrapper div.options::-webkit-scrollbar {
- width: 0px;
- height: 0px;
- /* remove scrollbar space */
- background: transparent;
- /* optional: just make scrollbar invisible */
-}
-
-input.userInputDynamic.error {
- color: #ac0000 !important;
-}
-input.userInputDynamic {
- border-radius: 3px;
- margin: 7px 10px;
-}
-
-div.conv-form-wrapper div#messages {
- position: relative;
- bottom: 0;
- margin-left: -5px;
- height: auto !important;
- width: 97%;
- padding-bottom: 20px;
-}
-div.conv-form-wrapper div.options {
- word-wrap: normal;
- white-space: nowrap;
- overflow-x: scroll;
- position: absolute;
- bottom: 100%;
- width: 100%;
- transform: translateY(-5px);
-}
-
-div.conv-form-wrapper div.message:after {
- content: '';
- display: table;
- clear: both;
-}
-div.conv-form-wrapper div.message.ready.rtol {
- animation: slideRtoLIn 0.5s ease;
- transform-origin: 0 0 0;
-}
-div.conv-form-wrapper div.message.ready.ltor {
- animation: slideLtoRIn 0.5s ease;
- transform-origin: 0 0 0;
-}
-div.conv-form-wrapper div#messages div.message {
- border-radius: 20px;
- padding: 12px 22px;
- font-size: 0.905rem;
- color: #333;
- display: inline-block;
- padding: 10px 15px 8px;
- border-radius: 20px;
- margin-bottom: 5px;
- float: right;
- clear: both;
- max-width: 65%;
- word-wrap: break-word;
-}
-/* div.conv-form-wrapper div#messages {
- background: linear-gradient(to right, #388eff 29%, #70aeff 81%);
- color: #fff;
- float: left;
- border-top-left-radius: 0;
-} */
-div.conv-form-wrapper div#messages div.message.to{
- float:left;
- border-top-left-radius: 0;
-}
-div.message.to {
- background: rgba(24, 24, 16, .2);
- border-radius: 2em;
- backdrop-filter: blur(15px);
- border: 2px solid rgba(255, 255, 255, 0.05);
- background-clip: padding-box;
- box-shadow: 10px 10px 10px rgba(46, 54, 68, 0.03);
- color: white;
-}
-
-/* div.conv-form-wrapper div#messages div.message.from {
- background: linear-gradient(to left, #38ff84 0%, #98fbbe 100%);
- color: #000000;
- border-top-right-radius: 0;
-} */
-div.conv-form-wrapper div#messages div.message.from{
- float:right;
- border-top-left-radius: 0;
-}
-div.message.from {
- background: rgba(24, 24, 16, .2);
- border-radius: 2em;
- backdrop-filter: blur(15px);
- border: 2px solid rgba(255, 255, 255, 0.05);
- background-clip: padding-box;
- box-shadow: 10px 10px 10px rgba(46, 54, 68, 0.03);
- color: white;
-}
-.message.to+.message.from, .message.from+.message.to {
- margin-top: 15px;
-}
-@keyframes slideRtoLIn {
- 0% {
- margin-right: -50px;
- }
-
- 100% {
- margin-right: 0px;
- }
-}
-@keyframes slideLtoRIn {
- 0% {
- margin-left: -50px;
- }
-
- 100% {
- margin-left: 0;
- }
-}
-
-div.convFormDynamic button.submit {
- padding: 3px;
- border: none;
- float: left;
- margin: 5px;
- color: #06c5a6;
- cursor: pointer;
- border-radius: 8px;
- font-size: 1.1rem;
- width: 36px;
- height: 35px;
- margin-top: 8px;
- background: #fff;
- outline: none !important;
-}
-
-
-div.convFormDynamic button.submit:hover {
- background: #06b79a;
- color: #fff;
-}
-
-button.submit.glow {
- box-shadow: 0 0 10px 5px rgba(6, 197, 166, 0.4);
-}
-.no-border {
- border: none !important;
-}
-.dragscroll {
- cursor: grab;
-}
-div.conv-form-wrapper div#messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar {
- width: 0px;
- /* remove scrollbar space */
- background: transparent;
- /* optional: just make scrollbar invisible */
-}
-span.clear {
- display: block;
- clear: both;
-}
-
-
-.header-wrapper {
- background-color: #000000;
- color: white;
- width: auto;
- height: 20px;
- top: 0px;
- left: 0px;
- padding-left: 10px;
- padding-top: 5px;
- padding-bottom: 25px;
- padding-right: 20px;
- text-align: right;
-}
-.logo{
- position: fixed;
- top: 0px;
- left: 5%;
- padding: 10px;
- padding-bottom: 20px;
-}
-
-.chatBody {
- overflow:hidden;
- /* background: #1a1a2e; */
- /* background: url("back.png"); */
- /* background-image: linear-gradient(-20deg, #00cdac 0%, #8ddad5 100%); */
-
- /* background: linear-gradient(to bottom right, rgba(252, 203, 144, 1), rgba(213, 126, 235, 1));
- background-size: cover;
- background-repeat: no-repeat; */
-
- /* background-image: url("back.png");
- background-repeat: no-repeat;
- background-position: 0% 0%;
- background-size: 100% 100%; */
-
- width: 100vw;
- min-height: 100vh;
- background: linear-gradient(to bottom right, rgba(252, 203, 144, 1), rgba(213, 126, 235, 1));
-}
-
-
-.awesome {
- font-family: "Comic Sans MS";
-
- width:100%;
- margin-top: 12px;
- margin-left: 12px;
- text-align: center;
-
- color:#313131;
- font-size:16px;
- font-weight: bold;
- position: absolute;
- -webkit-animation:colorchange 20s infinite alternate;
-}
-
-@-webkit-keyframes colorchange {
- 0% {
-
- color: lightblue;
- }
-
- 10% {
-
- color: #8e44ad;
- }
-
- 20% {
-
- color: #1abc9c;
- }
-
- 30% {
-
- color: #7afad8;
- }
-
- 40% {
-
- color: lightblue;
- }
-
- 50% {
-
- color: #70b8ff;
- }
-
- 60% {
-
- color: lightblue;
- }
-
- 70% {
-
- color: #2980b9;
- }
- 80% {
-
- color: #67d962;
- }
-
- 90% {
-
- color: #62c7d9;
- }
-
- 100% {
-
- color: #9ee84f;
- }
-}
-
-
-.mask-custom {
- background: rgba(24, 24, 16, .2);
- border-radius: 2em;
- backdrop-filter: blur(15px);
- border: 2px solid rgba(255, 255, 255, 0.05);
- background-clip: padding-box;
- box-shadow: 10px 10px 10px rgba(46, 54, 68, 0.03);
-}
\ No newline at end of file
diff --git a/web/images/icon.png b/web/images/icon.png
deleted file mode 100644
index a9c5dc6..0000000
Binary files a/web/images/icon.png and /dev/null differ
diff --git a/web/images/purple-gradient-bar.png b/web/images/purple-gradient-bar.png
deleted file mode 100644
index 43a5dfa..0000000
Binary files a/web/images/purple-gradient-bar.png and /dev/null differ
diff --git a/web/images/purple-gradient.jpg b/web/images/purple-gradient.jpg
deleted file mode 100644
index d749dc5..0000000
Binary files a/web/images/purple-gradient.jpg and /dev/null differ
diff --git a/web/index.html b/web/index.html
deleted file mode 100644
index 48a96d2..0000000
--- a/web/index.html
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
PROTON Welcomes you!
-