|
28 | 28 | from lark import Lark, UnexpectedCharacters, UnexpectedToken
|
29 | 29 | from PIL import Image
|
30 | 30 | from watchdog.observers import Observer
|
| 31 | +from threading import Event, Thread |
31 | 32 |
|
32 |
| -from visionscript import registry |
33 |
| -from visionscript.config import (CACHE_DIR, CACHE_DIRECTORY, |
34 |
| - CONCURRENT_MAXIMUM, |
| 33 | +from visionscript.config import (CACHE_DIRECTORY, |
35 | 34 | CONCURRENT_VIDEO_TRANSFORMATIONS, DATA_TYPES,
|
36 | 35 | DEVICE, FASTSAM_DIR, FASTSAM_WEIGHTS_DIR,
|
37 | 36 | MAX_FILE_SIZE, STACK_MAXIMUM,
|
38 | 37 | SUPPORTED_INFERENCE_MODELS,
|
39 |
| - SUPPORTED_TRAIN_MODELS, VIDEO_STRIDE) |
| 38 | + SUPPORTED_TRAIN_MODELS, VIDEO_STRIDE, ALIASED_FUNCTIONS) |
40 | 39 | from visionscript.error_handling import (handle_unexpected_characters,
|
41 | 40 | handle_unexpected_token)
|
42 | 41 | from visionscript.grammar import grammar
|
43 | 42 | from visionscript.paper_ocr_correction import (line_processing,
|
44 | 43 | syntax_correction)
|
45 | 44 | from visionscript.pose import Pose
|
46 |
| -from visionscript.rf_models import STANDARD_ROBOFLOW_MODELS |
47 | 45 | from visionscript.state import init_state
|
48 | 46 | from visionscript.usage import USAGE, language_grammar_reference
|
49 | 47 |
|
50 | 48 | # retrieve rf_models.json from ~/.cache/visionscript
|
51 | 49 | # this is where the user keeps a registry of custom models
|
52 | 50 | # which is combined with the standard RF models
|
53 |
| -if not os.path.exists(CACHE_DIR): |
54 |
| - os.makedirs(CACHE_DIR) |
| 51 | +if not os.path.exists(CACHE_DIRECTORY): |
| 52 | + os.makedirs(CACHE_DIRECTORY) |
55 | 53 |
|
56 |
| -if not os.path.exists(os.path.join(CACHE_DIR, "rf_models.json")): |
57 |
| - with open(os.path.join(CACHE_DIR, "rf_models.json"), "w") as f: |
| 54 | +if not os.path.exists(os.path.join(CACHE_DIRECTORY, "rf_models.json")): |
| 55 | + with open(os.path.join(CACHE_DIRECTORY, "rf_models.json"), "w") as f: |
58 | 56 | json.dump({}, f)
|
59 | 57 |
|
60 | 58 | parser = Lark(grammar, start="start")
|
|
68 | 66 |
|
69 | 67 |
|
70 | 68 | def run_command(cmd, directory=None):
|
71 |
| - with subprocess.DEVNULL as DEVNULL: |
72 |
| - result = subprocess.run( |
73 |
| - cmd, cwd=directory, stdout=DEVNULL, stderr=subprocess.STDOUT, check=True |
74 |
| - ) |
| 69 | + result = subprocess.run( |
| 70 | + cmd, cwd=directory, stderr=subprocess.STDOUT, check=True |
| 71 | + ) |
75 | 72 | if result.returncode != 0:
|
76 | 73 | raise ValueError(f"Command '{' '.join(cmd)}' failed to run.")
|
77 | 74 |
|
78 |
| - |
79 | 75 | def install_fastsam_dependencies():
|
| 76 | + print("Installing FastSAM dependencies... (this may take a few minutes)") |
80 | 77 | commands = [
|
81 | 78 | (
|
82 |
| - ["git", "-q", "clone", "https://github.com/CASIA-IVA-Lab/FastSAM"], |
| 79 | + ["git", "clone", "-q", "https://github.com/CASIA-IVA-Lab/FastSAM"], |
83 | 80 | CACHE_DIRECTORY,
|
84 | 81 | ),
|
85 | 82 | (["pip", "install", "--quiet", "-r", "requirements.txt"], FASTSAM_DIR),
|
@@ -155,16 +152,8 @@ def _get_colour_name(rgb_triplet):
|
155 | 152 | return min_colours[min(min_colours.keys())]
|
156 | 153 |
|
157 | 154 |
|
158 |
| -aliased_functions = { |
159 |
| - "isita": "classify", |
160 |
| - "find": "detect", |
161 |
| - "describe": "caption", |
162 |
| - "getcolors": "getcolours", |
163 |
| -} |
164 |
| - |
165 |
| - |
166 | 155 | def map_alias_to_underlying_function(alias):
|
167 |
| - return aliased_functions.get(alias, alias) |
| 156 | + return ALIASED_FUNCTIONS.get(alias, alias) |
168 | 157 |
|
169 | 158 |
|
170 | 159 | class VisionScript:
|
@@ -2376,7 +2365,7 @@ def evaluate_tree(self, tree, main_video_thread=False):
|
2376 | 2365 | self.state["ctx"]["last_profile_time"] = start_time
|
2377 | 2366 | self.state["ctx"]["last_command"] = token
|
2378 | 2367 |
|
2379 |
| - if token.value in aliased_functions: |
| 2368 | + if token.value in ALIASED_FUNCTIONS: |
2380 | 2369 | token.value = map_alias_to_underlying_function(token.value)
|
2381 | 2370 |
|
2382 | 2371 | if token.type == "equality":
|
@@ -2670,8 +2659,6 @@ def evaluate_tree(self, tree, main_video_thread=False):
|
2670 | 2659 | self.state["ctx"]["fps"] = 0
|
2671 | 2660 | self.state["ctx"]["active_file"] = None
|
2672 | 2661 |
|
2673 |
| - from threading import Event, Thread |
2674 |
| - |
2675 | 2662 | self.state["ctx"]["camera"] = cv2.VideoCapture(0)
|
2676 | 2663 |
|
2677 | 2664 | context = node.children
|
|
0 commit comments