diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 64a0697ece..344ad10e0f 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -2,7 +2,10 @@ [**Upgrade Guide**](https://intelowl.readthedocs.io/en/latest/Installation.md#update-to-the-most-recent-version) -## [v6.0.2](https://github.com/intelowlproject/IntelOwl/releases/tag/v6.0.1) +## [v6.0.4](https://github.com/intelowlproject/IntelOwl/releases/tag/v6.0.4) +Mostly adjusts and fixes with few new analyzers: Vulners and AILTypoSquatting Library. + +## [v6.0.2](https://github.com/intelowlproject/IntelOwl/releases/tag/v6.0.2) Major fixes and adjustments. We improved the documentation to help the transition to the new major version. We added **Pivot** buttons to enable manual Pivoting from an Observable/File analysis to another. See [Doc](https://intelowl.readthedocs.io/en/latest/Usage.html#pivots) for more info diff --git a/.github/workflows/pull_request_automation.yml b/.github/workflows/pull_request_automation.yml index 2de2556c45..55fc2e5e3a 100644 --- a/.github/workflows/pull_request_automation.yml +++ b/.github/workflows/pull_request_automation.yml @@ -87,7 +87,6 @@ jobs: BUILDKIT_PROGRESS: "plain" STAGE: "ci" REPO_DOWNLOADER_ENABLED: false - WATCHMAN: false - name: Startup script launch (Fast) if: "!contains(github.base_ref, 'master')" @@ -98,7 +97,6 @@ jobs: BUILDKIT_PROGRESS: "plain" STAGE: "ci" REPO_DOWNLOADER_ENABLED: false - WATCHMAN: false - name: Docker debug if: always() diff --git a/.gitignore b/.gitignore index 1ff56b270c..978b5a8152 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,6 @@ coverage.xml *.cover .hypothesis/ /.env + +# post run dev +integrations/malware_tools_analyzers/clamav/sigs \ No newline at end of file diff --git a/api_app/analyzers_manager/file_analyzers/detectiteasy.py b/api_app/analyzers_manager/file_analyzers/detectiteasy.py new file mode 100644 index 0000000000..3ac5e35e49 --- /dev/null +++ b/api_app/analyzers_manager/file_analyzers/detectiteasy.py @@ -0,0 +1,60 @@ +import logging + +from api_app.analyzers_manager.classes import DockerBasedAnalyzer, FileAnalyzer +from tests.mock_utils import MockUpResponse + +logger = logging.getLogger(__name__) + + +class DetectItEasy(FileAnalyzer, DockerBasedAnalyzer): + name: str = "executable_analyzer" + url: str = "http://malware_tools_analyzers:4002/die" + # http request polling max number of tries + max_tries: int = 10 + # interval between http request polling (in secs) + poll_distance: int = 1 + + def update(self): + pass + + def run(self): + fname = str(self.filename).replace("/", "_").replace(" ", "_") + # get the file to send + binary = self.read_file_bytes() + args = [f"@{fname}", "--json"] + req_data = { + "args": args, + } + req_files = {fname: binary} + logger.info( + f"Running {self.analyzer_name} on {self.filename} with args: {args}" + ) + report = self._docker_run(req_data, req_files, analyzer_name=self.analyzer_name) + if not report: + self.report.errors.append("DIE did not detect the file type") + return {} + return report + + @staticmethod + def mocked_docker_analyzer_get(*args, **kwargs): + return MockUpResponse( + { + "report": { + "arch": "NOEXEC", + "mode": "Unknown", + "type": "Unknown", + "detects": [ + { + "name": "Zip", + "type": "archive", + "string": "archive: Zip(2.0)[38.5%,1 file]", + "options": "38.5%,1 file", + "version": "2.0", + } + ], + "filetype": "Binary", + "endianess": "LE", + } + }, + 200, + ) diff --git a/api_app/analyzers_manager/file_analyzers/malprob.py b/api_app/analyzers_manager/file_analyzers/malprob.py new file mode 100644 index 0000000000..174a53b183 --- /dev/null +++ b/api_app/analyzers_manager/file_analyzers/malprob.py @@ -0,0 +1,79 @@ +import logging + +import requests + +from api_app.analyzers_manager.classes import FileAnalyzer +from api_app.analyzers_manager.exceptions import AnalyzerRunException +from tests.mock_utils import MockUpResponse, if_mock_connections, patch + +logger = logging.getLogger(__name__) + + +class MalprobScan(FileAnalyzer): + url: str = "https://malprob.io/api" + private: bool = False + timeout: int = 60 + _api_key_name: str + + def update(self): + pass + + def run(self): + file_name = str(self.filename).replace("/", "_").replace(" ", "_") + headers = {"Authorization": f"Token {self._api_key_name}"} + binary_file = self.read_file_bytes() + + if self._job.tlp == self._job.TLP.CLEAR.value: + logger.info(f"uploading {file_name}:{self.md5} to MalProb.io for analysis") + scan = requests.post( + f"{self.url}/scan/", + files={"file": binary_file}, + data={"name": file_name, "private": self.private}, + headers=headers, + timeout=self.timeout, + ) + scan.raise_for_status() + if scan.status_code == 204: + self.disable_for_rate_limit() + raise AnalyzerRunException("Limit reached for API") + elif scan.status_code == 302: + logger.info( + f"status 302: file already exists | Rescanning the file: {self.md5}" + ) + else: + return scan.json() + + logger.info(f"rescanning {file_name} using {self.md5} on MalProb.io") + rescan = requests.post( + f"{self.url}/rescan/", + data={"hashcode": self.md5}, + headers=headers, + timeout=self.timeout, + ) + rescan.raise_for_status() + if rescan.status_code == 204: + self.disable_for_rate_limit() + raise AnalyzerRunException("Limit reached for API") + return rescan.json() + + @classmethod + def _monkeypatch(cls): + patches = [ + if_mock_connections( + patch( + "requests.post", + return_value=MockUpResponse( + { + "report": { + "md5": "8a05a189e58ccd7275f7ffdf88c2c191", + "sha1": "a7a70f2f482e6b26eedcf1781b277718078c743a", + "sha256": """ac24043d48dadc390877a6151515565b1fdc1da + b028ee2d95d80bd80085d9376""", + }, + }, + 200, + ), + ), + ) + ] + return super()._monkeypatch(patches=patches) diff --git a/api_app/analyzers_manager/migrations/0091_analyzer_config_vulners.py b/api_app/analyzers_manager/migrations/0091_analyzer_config_vulners.py new file mode 100644 index 0000000000..98efca1de6 --- /dev/null +++ b/api_app/analyzers_manager/migrations/0091_analyzer_config_vulners.py @@ -0,0 +1,235 @@ +from django.db import migrations +from django.db.models.fields.related_descriptors import ( + ForwardManyToOneDescriptor, + ForwardOneToOneDescriptor, + ManyToManyDescriptor, +) + +plugin = { + "python_module": { + "health_check_schedule": { + "minute": "0", + "hour": "0", + "day_of_week": "*", + "day_of_month": "*", + "month_of_year": "*", + }, + "update_schedule": None, + "module": "vulners.Vulners", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "Vulners", + "description": "[Vulners](vulners.com) is the most complete and the only fully correlated security intelligence database, which goes through constant updates and links 200+ data sources in a unified machine-readable format. It contains 8 mln+ entries, including CVEs, advisories, exploits, and IoCs — everything you need to stay abreast on the latest security threats.", + "disabled": False, + "soft_time_limit": 60, + "routing_key": "default", + "health_check_status": True, + "type": "observable", + "docker_based": False, + "maximum_tlp": "AMBER", + "observable_supported": ["generic"], + "supported_filetypes": [], + "run_hash": False, + "run_hash_type": "", + "not_supported_filetypes": [], + "model": "analyzers_manager.AnalyzerConfig", +} + +params = [ + { + "python_module": { + "module": "vulners.Vulners", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "score_AI", + "type": "bool", + "description": "Score any vulnerability with Vulners AI.\r\nDefault: False", + "is_secret": False, + "required": False, + }, + { + "python_module": { + "module": "vulners.Vulners", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "api_key_name", + "type": "str", + "description": "api key for vulners", + "is_secret": True, + "required": True, + }, + { + "python_module": { + "module": "vulners.Vulners", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "skip", + "type": "int", + "description": "skip parameter for vulners analyzer", + "is_secret": False, + "required": False, + }, + { + "python_module": { + "module": "vulners.Vulners", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "size", + "type": "int", + "description": "size parameter for vulners analyzer", + "is_secret": False, + "required": False, + }, +] + +values = [ + { + "parameter": { + "python_module": { + "module": "vulners.Vulners", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "score_AI", + "type": "bool", + "description": "Score any vulnerability with Vulners AI.\r\nDefault: False", + "is_secret": False, + "required": False, + }, + "analyzer_config": "Vulners", + "connector_config": None, + "visualizer_config": None, + "ingestor_config": None, + "pivot_config": None, + "for_organization": False, + "value": False, + "updated_at": "2024-05-22T18:49:52.056060Z", + "owner": None, + }, + { + "parameter": { + "python_module": { + "module": "vulners.Vulners", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "skip", + "type": "int", + "description": "skip parameter for vulners analyzer", + "is_secret": False, + "required": False, + }, + "analyzer_config": "Vulners", + "connector_config": None, + "visualizer_config": None, + "ingestor_config": None, + "pivot_config": None, + "for_organization": False, + "value": 0, + "updated_at": "2024-05-23T06:45:24.105426Z", + "owner": None, + }, + { + "parameter": { + "python_module": { + "module": "vulners.Vulners", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "size", + "type": "int", + "description": "size parameter for vulners analyzer", + "is_secret": False, + "required": False, + }, + "analyzer_config": "Vulners", + "connector_config": None, + "visualizer_config": None, + "ingestor_config": None, + "pivot_config": None, + "for_organization": False, + "value": 5, + "updated_at": "2024-05-23T06:45:24.109831Z", + "owner": None, + }, +] + + +def _get_real_obj(Model, field, value): + def _get_obj(Model, other_model, value): + if isinstance(value, dict): + real_vals = {} + for key, real_val in value.items(): + real_vals[key] = _get_real_obj(other_model, key, real_val) + value = other_model.objects.get_or_create(**real_vals)[0] + # it is just the primary key serialized + else: + if isinstance(value, int): + if Model.__name__ == "PluginConfig": + value = other_model.objects.get(name=plugin["name"]) + else: + value = other_model.objects.get(pk=value) + else: + value = other_model.objects.get(name=value) + return value + + if ( + type(getattr(Model, field)) + in [ForwardManyToOneDescriptor, ForwardOneToOneDescriptor] + and value + ): + other_model = getattr(Model, field).get_queryset().model + value = _get_obj(Model, other_model, value) + elif type(getattr(Model, field)) in [ManyToManyDescriptor] and value: + other_model = getattr(Model, field).rel.model + value = [_get_obj(Model, other_model, val) for val in value] + return value + + +def _create_object(Model, data): + mtm, no_mtm = {}, {} + for field, value in data.items(): + value = _get_real_obj(Model, field, value) + if type(getattr(Model, field)) is ManyToManyDescriptor: + mtm[field] = value + else: + no_mtm[field] = value + try: + o = Model.objects.get(**no_mtm) + except Model.DoesNotExist: + o = Model(**no_mtm) + o.full_clean() + o.save() + for field, value in mtm.items(): + attribute = getattr(o, field) + if value is not None: + attribute.set(value) + return False + return True + + +def migrate(apps, schema_editor): + Parameter = apps.get_model("api_app", "Parameter") + PluginConfig = apps.get_model("api_app", "PluginConfig") + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + if not Model.objects.filter(name=plugin["name"]).exists(): + exists = _create_object(Model, plugin) + if not exists: + for param in params: + _create_object(Parameter, param) + for value in values: + _create_object(PluginConfig, value) + + +def reverse_migrate(apps, schema_editor): + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + Model.objects.get(name=plugin["name"]).delete() + + +class Migration(migrations.Migration): + atomic = False + dependencies = [ + ("api_app", "0062_alter_parameter_python_module"), + ("analyzers_manager", "0090_analyzer_config_cycat"), + ] + + operations = [migrations.RunPython(migrate, reverse_migrate)] diff --git a/api_app/analyzers_manager/migrations/0092_alter_validin_desc.py b/api_app/analyzers_manager/migrations/0092_alter_validin_desc.py new file mode 100644 index 0000000000..a7476a1a84 --- /dev/null +++ b/api_app/analyzers_manager/migrations/0092_alter_validin_desc.py @@ -0,0 +1,36 @@ +from django.db import migrations + + +def migrate(apps, schema_editor): + AnalyzerConfig = apps.get_model("analyzers_manager", "AnalyzerConfig") + plugin_name = "Validin" + correct_description = "[Validin's](https://app.validin.com) API for threat researchers, teams, and companies to investigate historic and current data describing the structure and composition of the internet." + + try: + plugin = AnalyzerConfig.objects.get(name=plugin_name) + plugin.description = correct_description + plugin.save() + except AnalyzerConfig.DoesNotExist: + pass + + +def reverse_migrate(apps, schema_editor): + AnalyzerConfig = apps.get_model("analyzers_manager", "AnalyzerConfig") + plugin_name = "Validin" + original_description = "(Validin's)[https://app.validin.com/docs] API for threat researchers, teams, and companies to investigate historic and current data describing the structure and composition of the internet." + + try: + plugin = AnalyzerConfig.objects.get(name=plugin_name) + plugin.description = original_description + plugin.save() + except AnalyzerConfig.DoesNotExist: + pass + + +class Migration(migrations.Migration): + atomic = False + + dependencies = [ + ("analyzers_manager", "0091_analyzer_config_vulners"), + ] + operations = [migrations.RunPython(migrate, reverse_migrate)] diff --git a/api_app/analyzers_manager/migrations/0093_analyzer_config_ailtyposquatting.py b/api_app/analyzers_manager/migrations/0093_analyzer_config_ailtyposquatting.py new file mode 100644 index 0000000000..fc7ad9dff1 --- /dev/null +++ b/api_app/analyzers_manager/migrations/0093_analyzer_config_ailtyposquatting.py @@ -0,0 +1,151 @@ +from django.db import migrations +from django.db.models.fields.related_descriptors import ( + ForwardManyToOneDescriptor, + ForwardOneToOneDescriptor, + ManyToManyDescriptor, +) + +plugin = { + "python_module": { + "health_check_schedule": None, + "update_schedule": None, + "module": "ailtyposquatting.AilTypoSquatting", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "AILTypoSquatting", + "description": "[AILTypoSquatting](https://github.com/typosquatter/ail-typo-squatting) is a Python library to generate list of potential typo squatting domains with domain name permutation engine to feed AIL and other systems.", + "disabled": False, + "soft_time_limit": 60, + "routing_key": "default", + "health_check_status": True, + "type": "observable", + "docker_based": False, + "maximum_tlp": "RED", + "observable_supported": ["domain"], + "supported_filetypes": [], + "run_hash": False, + "run_hash_type": "", + "not_supported_filetypes": [], + "model": "analyzers_manager.AnalyzerConfig", +} + +params = [ + { + "python_module": { + "module": "ailtyposquatting.AilTypoSquatting", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "dns_resolving", + "type": "bool", + "description": "dns_resolving for AilTypoSquatting; only works for TLP CLEAR", + "is_secret": False, + "required": False, + }, +] +values = [ + { + "parameter": { + "python_module": { + "module": "ailtyposquatting.AilTypoSquatting", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "dns_resolving", + "type": "bool", + "description": "dns_resolving for AilTypoSquatting; only works for TLP CLEAR", + "is_secret": False, + "required": False, + }, + "analyzer_config": "AILTypoSquatting", + "connector_config": None, + "visualizer_config": None, + "ingestor_config": None, + "pivot_config": None, + "for_organization": False, + "value": False, + "updated_at": "2024-05-26T00:10:15.236358Z", + "owner": None, + }, +] + + +def _get_real_obj(Model, field, value): + def _get_obj(Model, other_model, value): + if isinstance(value, dict): + real_vals = {} + for key, real_val in value.items(): + real_vals[key] = _get_real_obj(other_model, key, real_val) + value = other_model.objects.get_or_create(**real_vals)[0] + # it is just the primary key serialized + else: + if isinstance(value, int): + if Model.__name__ == "PluginConfig": + value = other_model.objects.get(name=plugin["name"]) + else: + value = other_model.objects.get(pk=value) + else: + value = other_model.objects.get(name=value) + return value + + if ( + type(getattr(Model, field)) + in [ForwardManyToOneDescriptor, ForwardOneToOneDescriptor] + and value + ): + other_model = getattr(Model, field).get_queryset().model + value = _get_obj(Model, other_model, value) + elif type(getattr(Model, field)) in [ManyToManyDescriptor] and value: + other_model = getattr(Model, field).rel.model + value = [_get_obj(Model, other_model, val) for val in value] + return value + + +def _create_object(Model, data): + mtm, no_mtm = {}, {} + for field, value in data.items(): + value = _get_real_obj(Model, field, value) + if type(getattr(Model, field)) is ManyToManyDescriptor: + mtm[field] = value + else: + no_mtm[field] = value + try: + o = Model.objects.get(**no_mtm) + except Model.DoesNotExist: + o = Model(**no_mtm) + o.full_clean() + o.save() + for field, value in mtm.items(): + attribute = getattr(o, field) + if value is not None: + attribute.set(value) + return False + return True + + +def migrate(apps, schema_editor): + Parameter = apps.get_model("api_app", "Parameter") + PluginConfig = apps.get_model("api_app", "PluginConfig") + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + if not Model.objects.filter(name=plugin["name"]).exists(): + exists = _create_object(Model, plugin) + if not exists: + for param in params: + _create_object(Parameter, param) + for value in values: + _create_object(PluginConfig, value) + + +def reverse_migrate(apps, schema_editor): + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + Model.objects.get(name=plugin["name"]).delete() + + +class Migration(migrations.Migration): + atomic = False + dependencies = [ + ("api_app", "0062_alter_parameter_python_module"), + ("analyzers_manager", "0092_alter_validin_desc"), + ] + + operations = [migrations.RunPython(migrate, reverse_migrate)] diff --git a/api_app/analyzers_manager/migrations/0094_analyzer_config_detectiteasy.py b/api_app/analyzers_manager/migrations/0094_analyzer_config_detectiteasy.py new file mode 100644 index 0000000000..ce01d55109 --- /dev/null +++ b/api_app/analyzers_manager/migrations/0094_analyzer_config_detectiteasy.py @@ -0,0 +1,185 @@ +from django.db import migrations +from django.db.models.fields.related_descriptors import ( + ForwardManyToOneDescriptor, + ForwardOneToOneDescriptor, + ManyToManyDescriptor, +) + +plugin = { + "python_module": { + "health_check_schedule": None, + "update_schedule": None, + "module": "detectiteasy.DetectItEasy", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "DetectItEasy", + "description": "[DetectItEasy](https://github.com/horsicq/Detect-It-Easy) is a program for determining types of files.", + "disabled": False, + "soft_time_limit": 10, + "routing_key": "default", + "health_check_status": True, + "type": "file", + "docker_based": True, + "maximum_tlp": "RED", + "observable_supported": [], + "supported_filetypes": [], + "run_hash": False, + "run_hash_type": "", + "not_supported_filetypes": [], + "model": "analyzers_manager.AnalyzerConfig", +} + +params = [ + { + "python_module": { + "module": "detectiteasy.DetectItEasy", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "max_tries", + "type": "int", + "description": "max_tries for detect it easy", + "is_secret": False, + "required": False, + }, + { + "python_module": { + "module": "detectiteasy.DetectItEasy", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "poll_distance", + "type": "int", + "description": "poll_distance for detect it easy", + "is_secret": False, + "required": False, + }, +] + +values = [ + { + "parameter": { + "python_module": { + "module": "detectiteasy.DetectItEasy", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "max_tries", + "type": "int", + "description": "max_tries for detect it easy", + "is_secret": False, + "required": False, + }, + "analyzer_config": "DetectItEasy", + "connector_config": None, + "visualizer_config": None, + "ingestor_config": None, + "pivot_config": None, + "for_organization": False, + "value": 10, + "updated_at": "2024-06-05T10:38:28.119622Z", + "owner": None, + }, + { + "parameter": { + "python_module": { + "module": "detectiteasy.DetectItEasy", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "poll_distance", + "type": "int", + "description": "poll_distance for detect it easy", + "is_secret": False, + "required": False, + }, + "analyzer_config": "DetectItEasy", + "connector_config": None, + "visualizer_config": None, + "ingestor_config": None, + "pivot_config": None, + "for_organization": False, + "value": 1, + "updated_at": "2024-06-05T10:38:28.426691Z", + "owner": None, + }, +] + + +def _get_real_obj(Model, field, value): + def _get_obj(Model, other_model, value): + if isinstance(value, dict): + real_vals = {} + for key, real_val in value.items(): + real_vals[key] = _get_real_obj(other_model, key, real_val) + value = other_model.objects.get_or_create(**real_vals)[0] + # it is just the primary key serialized + else: + if isinstance(value, int): + if Model.__name__ == "PluginConfig": + value = other_model.objects.get(name=plugin["name"]) + else: + value = other_model.objects.get(pk=value) + else: + value = other_model.objects.get(name=value) + return value + + if ( + type(getattr(Model, field)) + in [ForwardManyToOneDescriptor, ForwardOneToOneDescriptor] + and value + ): + other_model = getattr(Model, field).get_queryset().model + value = _get_obj(Model, other_model, value) + elif type(getattr(Model, field)) in [ManyToManyDescriptor] and value: + other_model = getattr(Model, field).rel.model + value = [_get_obj(Model, other_model, val) for val in value] + return value + + +def _create_object(Model, data): + mtm, no_mtm = {}, {} + for field, value in data.items(): + value = _get_real_obj(Model, field, value) + if type(getattr(Model, field)) is ManyToManyDescriptor: + mtm[field] = value + else: + no_mtm[field] = value + try: + o = Model.objects.get(**no_mtm) + except Model.DoesNotExist: + o = Model(**no_mtm) + o.full_clean() + o.save() + for field, value in mtm.items(): + attribute = getattr(o, field) + if value is not None: + attribute.set(value) + return False + return True + + +def migrate(apps, schema_editor): + Parameter = apps.get_model("api_app", "Parameter") + PluginConfig = apps.get_model("api_app", "PluginConfig") + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + if not Model.objects.filter(name=plugin["name"]).exists(): + exists = _create_object(Model, plugin) + if not exists: + for param in params: + _create_object(Parameter, param) + for value in values: + _create_object(PluginConfig, value) + + +def reverse_migrate(apps, schema_editor): + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + Model.objects.get(name=plugin["name"]).delete() + + +class Migration(migrations.Migration): + atomic = False + dependencies = [ + ("api_app", "0062_alter_parameter_python_module"), + ("analyzers_manager", "0093_analyzer_config_ailtyposquatting"), + ] + + operations = [migrations.RunPython(migrate, reverse_migrate)] diff --git a/api_app/analyzers_manager/migrations/0095_analyzer_config_malprobsearch.py b/api_app/analyzers_manager/migrations/0095_analyzer_config_malprobsearch.py new file mode 100644 index 0000000000..98fa1bc801 --- /dev/null +++ b/api_app/analyzers_manager/migrations/0095_analyzer_config_malprobsearch.py @@ -0,0 +1,123 @@ +from django.db import migrations +from django.db.models.fields.related_descriptors import ( + ForwardManyToOneDescriptor, + ForwardOneToOneDescriptor, + ManyToManyDescriptor, +) + +plugin = { + "python_module": { + "health_check_schedule": { + "minute": "0", + "hour": "0", + "day_of_week": "*", + "day_of_month": "*", + "month_of_year": "*", + }, + "update_schedule": None, + "module": "malprob.MalprobSearch", + "base_path": "api_app.analyzers_manager.observable_analyzers", + }, + "name": "MalprobSearch", + "description": "[Malprob](https://malprob.io/) is a leading malware detection and identification service, powered by cutting-edge AI technology.", + "disabled": False, + "soft_time_limit": 10, + "routing_key": "default", + "health_check_status": True, + "type": "observable", + "docker_based": False, + "maximum_tlp": "AMBER", + "observable_supported": ["hash"], + "supported_filetypes": [], + "run_hash": False, + "run_hash_type": "", + "not_supported_filetypes": [], + "model": "analyzers_manager.AnalyzerConfig", +} + +params = [] + +values = [] + + +def _get_real_obj(Model, field, value): + def _get_obj(Model, other_model, value): + if isinstance(value, dict): + real_vals = {} + for key, real_val in value.items(): + real_vals[key] = _get_real_obj(other_model, key, real_val) + value = other_model.objects.get_or_create(**real_vals)[0] + # it is just the primary key serialized + else: + if isinstance(value, int): + if Model.__name__ == "PluginConfig": + value = other_model.objects.get(name=plugin["name"]) + else: + value = other_model.objects.get(pk=value) + else: + value = other_model.objects.get(name=value) + return value + + if ( + type(getattr(Model, field)) + in [ForwardManyToOneDescriptor, ForwardOneToOneDescriptor] + and value + ): + other_model = getattr(Model, field).get_queryset().model + value = _get_obj(Model, other_model, value) + elif type(getattr(Model, field)) in [ManyToManyDescriptor] and value: + other_model = getattr(Model, field).rel.model + value = [_get_obj(Model, other_model, val) for val in value] + return value + + +def _create_object(Model, data): + mtm, no_mtm = {}, {} + for field, value in data.items(): + value = _get_real_obj(Model, field, value) + if type(getattr(Model, field)) is ManyToManyDescriptor: + mtm[field] = value + else: + no_mtm[field] = value + try: + o = Model.objects.get(**no_mtm) + except Model.DoesNotExist: + o = Model(**no_mtm) + o.full_clean() + o.save() + for field, value in mtm.items(): + attribute = getattr(o, field) + if value is not None: + attribute.set(value) + return False + return True + + +def migrate(apps, schema_editor): + Parameter = apps.get_model("api_app", "Parameter") + PluginConfig = apps.get_model("api_app", "PluginConfig") + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + if not Model.objects.filter(name=plugin["name"]).exists(): + exists = _create_object(Model, plugin) + if not exists: + for param in params: + _create_object(Parameter, param) + for value in values: + _create_object(PluginConfig, value) + + +def reverse_migrate(apps, schema_editor): + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + Model.objects.get(name=plugin["name"]).delete() + + +class Migration(migrations.Migration): + atomic = False + dependencies = [ + ("api_app", "0062_alter_parameter_python_module"), + ("analyzers_manager", "0094_analyzer_config_detectiteasy"), + ] + + operations = [migrations.RunPython(migrate, reverse_migrate)] diff --git a/api_app/analyzers_manager/migrations/0096_analyzer_config_malprobscan.py b/api_app/analyzers_manager/migrations/0096_analyzer_config_malprobscan.py new file mode 100644 index 0000000000..4761282db3 --- /dev/null +++ b/api_app/analyzers_manager/migrations/0096_analyzer_config_malprobscan.py @@ -0,0 +1,202 @@ +from django.db import migrations +from django.db.models.fields.related_descriptors import ( + ForwardManyToOneDescriptor, + ForwardOneToOneDescriptor, + ManyToManyDescriptor, +) + +plugin = { + "python_module": { + "health_check_schedule": { + "minute": "0", + "hour": "0", + "day_of_week": "*", + "day_of_month": "*", + "month_of_year": "*", + }, + "update_schedule": None, + "module": "malprob.MalprobScan", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "MalprobScan", + "description": "[Malprob](https://malprob.io/) is a malware detection and identification service, powered by cutting-edge AI technology.", + "disabled": False, + "soft_time_limit": 60, + "routing_key": "default", + "health_check_status": True, + "type": "file", + "docker_based": False, + "maximum_tlp": "AMBER", + "observable_supported": [], + "supported_filetypes": [], + "run_hash": False, + "run_hash_type": "", + "not_supported_filetypes": [], + "model": "analyzers_manager.AnalyzerConfig", +} + +params = [ + { + "python_module": { + "module": "malprob.MalprobScan", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "api_key_name", + "type": "str", + "description": "api key for MalprobScan", + "is_secret": True, + "required": True, + }, + { + "python_module": { + "module": "malprob.MalprobScan", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "private", + "type": "bool", + "description": "private scan for MalprobScan", + "is_secret": False, + "required": False, + }, + { + "python_module": { + "module": "malprob.MalprobScan", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "timeout", + "type": "int", + "description": "request timeout for MalprobScan", + "is_secret": False, + "required": False, + }, +] + +values = [ + { + "parameter": { + "python_module": { + "module": "malprob.MalprobScan", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "private", + "type": "bool", + "description": "private scan for MalprobScan", + "is_secret": False, + "required": False, + }, + "analyzer_config": "MalprobScan", + "connector_config": None, + "visualizer_config": None, + "ingestor_config": None, + "pivot_config": None, + "for_organization": False, + "value": False, + "updated_at": "2024-06-03T22:17:04.195860Z", + "owner": None, + }, + { + "parameter": { + "python_module": { + "module": "malprob.MalprobScan", + "base_path": "api_app.analyzers_manager.file_analyzers", + }, + "name": "timeout", + "type": "int", + "description": "request timeout for MalprobScan", + "is_secret": False, + "required": False, + }, + "analyzer_config": "MalprobScan", + "connector_config": None, + "visualizer_config": None, + "ingestor_config": None, + "pivot_config": None, + "for_organization": False, + "value": 60, + "updated_at": "2024-06-04T10:23:40.132533Z", + "owner": None, + }, +] + + +def _get_real_obj(Model, field, value): + def _get_obj(Model, other_model, value): + if isinstance(value, dict): + real_vals = {} + for key, real_val in value.items(): + real_vals[key] = _get_real_obj(other_model, key, real_val) + value = other_model.objects.get_or_create(**real_vals)[0] + # it is just the primary key serialized + else: + if isinstance(value, int): + if Model.__name__ == "PluginConfig": + value = other_model.objects.get(name=plugin["name"]) + else: + value = other_model.objects.get(pk=value) + else: + value = other_model.objects.get(name=value) + return value + + if ( + type(getattr(Model, field)) + in [ForwardManyToOneDescriptor, ForwardOneToOneDescriptor] + and value + ): + other_model = getattr(Model, field).get_queryset().model + value = _get_obj(Model, other_model, value) + elif type(getattr(Model, field)) in [ManyToManyDescriptor] and value: + other_model = getattr(Model, field).rel.model + value = [_get_obj(Model, other_model, val) for val in value] + return value + + +def _create_object(Model, data): + mtm, no_mtm = {}, {} + for field, value in data.items(): + value = _get_real_obj(Model, field, value) + if type(getattr(Model, field)) is ManyToManyDescriptor: + mtm[field] = value + else: + no_mtm[field] = value + try: + o = Model.objects.get(**no_mtm) + except Model.DoesNotExist: + o = Model(**no_mtm) + o.full_clean() + o.save() + for field, value in mtm.items(): + attribute = getattr(o, field) + if value is not None: + attribute.set(value) + return False + return True + + +def migrate(apps, schema_editor): + Parameter = apps.get_model("api_app", "Parameter") + PluginConfig = apps.get_model("api_app", "PluginConfig") + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + if not Model.objects.filter(name=plugin["name"]).exists(): + exists = _create_object(Model, plugin) + if not exists: + for param in params: + _create_object(Parameter, param) + for value in values: + _create_object(PluginConfig, value) + + +def reverse_migrate(apps, schema_editor): + python_path = plugin.pop("model") + Model = apps.get_model(*python_path.split(".")) + Model.objects.get(name=plugin["name"]).delete() + + +class Migration(migrations.Migration): + atomic = False + dependencies = [ + ("api_app", "0062_alter_parameter_python_module"), + ("analyzers_manager", "0095_analyzer_config_malprobsearch"), + ] + + operations = [migrations.RunPython(migrate, reverse_migrate)] diff --git a/api_app/analyzers_manager/observable_analyzers/ailtyposquatting.py b/api_app/analyzers_manager/observable_analyzers/ailtyposquatting.py new file mode 100644 index 0000000000..0b79815813 --- /dev/null +++ b/api_app/analyzers_manager/observable_analyzers/ailtyposquatting.py @@ -0,0 +1,67 @@ +import logging +import math + +from ail_typo_squatting import typo +from ail_typo_squatting.dns_local import resolving + +from api_app.analyzers_manager import classes +from tests.mock_utils import if_mock_connections, patch + +logger = logging.getLogger(__name__) + + +class AilTypoSquatting(classes.ObservableAnalyzer): + """ + wrapper for https://github.com/typosquatter/ail-typo-squatting + """ + + dns_resolving: bool = False + + def update(self) -> bool: + pass + + def run(self): + response = {} + logger.info( + f"""running AilTypoSquatting on {self.observable_name} + with tlp {self._job.tlp} + and dns resolving {self.dns_resolving}""" + ) + + response["algorithms"] = typo.runAll( + domain=self.observable_name, + limit=math.inf, + formatoutput="text", + pathOutput=None, + ) + if self._job.tlp == self._job.TLP.CLEAR.value and self.dns_resolving: + # for "x.com", response["algorithms"][0]=".com" + # which is not valid for look up + if len(self.observable_name.split(".")[0]) == 1: + logger.info( + f"""running dns resolving on {self.observable_name} + excluding {response['algorithms'][0]}""" + ) + response["dnsResolving"] = resolving.dnsResolving( + resultList=response["algorithms"][1:], + domain=self.observable_name, + pathOutput=None, + ) + else: + response["dnsResolving"] = resolving.dnsResolving( + resultList=response["algorithms"], + domain=self.observable_name, + pathOutput=None, + ) + + return response + + @classmethod + def _monkeypatch(cls): + patches = [ + if_mock_connections( + patch.object(typo, "runAll", return_value=None), + patch.object(resolving, "dnsResolving", return_value=None), + ) + ] + return super()._monkeypatch(patches=patches) diff --git a/api_app/analyzers_manager/observable_analyzers/dns/dns_resolvers/dns0_eu_resolver.py b/api_app/analyzers_manager/observable_analyzers/dns/dns_resolvers/dns0_eu_resolver.py index 4439d92c4c..e9514e90a9 100644 --- a/api_app/analyzers_manager/observable_analyzers/dns/dns_resolvers/dns0_eu_resolver.py +++ b/api_app/analyzers_manager/observable_analyzers/dns/dns_resolvers/dns0_eu_resolver.py @@ -22,6 +22,9 @@ class DNS0EUResolver(classes.ObservableAnalyzer): class NotADomain(Exception): pass + url = "https://dns0.eu" + headers = {"Accept": "application/dns-json"} + query_type: str def run(self): @@ -38,11 +41,9 @@ def run(self): else: raise self.NotADomain() - headers = {"Accept": "application/dns-json"} - url = "https://dns0.eu" params = {"name": observable, "type": self.query_type} - response = requests.get(url, headers=headers, params=params) + response = requests.get(self.url, headers=self.headers, params=params) response.raise_for_status() resolutions = response.json().get("Answer", []) except requests.RequestException: diff --git a/api_app/analyzers_manager/observable_analyzers/malprob.py b/api_app/analyzers_manager/observable_analyzers/malprob.py new file mode 100644 index 0000000000..1316158398 --- /dev/null +++ b/api_app/analyzers_manager/observable_analyzers/malprob.py @@ -0,0 +1,134 @@ +import requests + +from api_app.analyzers_manager import classes +from tests.mock_utils import MockUpResponse, if_mock_connections, patch + + +class MalprobSearch(classes.ObservableAnalyzer): + url: str = "https://malprob.io/api" + + def update(self): + pass + + def run(self): + response = requests.get( + f"{self.url}/search/{self.observable_name}", + timeout=10, + ) + response.raise_for_status() + return response.json() + + @classmethod + def _monkeypatch(cls): + patches = [ + if_mock_connections( + patch( + "requests.get", + return_value=MockUpResponse( + { + "report": { + "md5": "8a05a189e58ccd7275f7ffdf88c2c191", + "mime": "application/java-archive", + "name": "sample.apk", + "sha1": "a7a70f2f482e6b26eedcf1781b277718078c743a", + "size": 3425, + "test": 0, + "trid": """Android Package (63.7%) | + Java Archive (26.4%) | + ZIP compressed archive (7.8%) | + PrintFox/Pagefox bitmap (1.9%)""", + "type": "ARCHIVE", + "label": "benign", + "magic": "application/java-archive", + "score": 0.0003923133846427324, + "nested": [ + { + "name": "MANIFEST.MF", + "size": 331, + "type": "text/plain", + "score": 0.0003923133846427324, + "sha256": """b093f736dac9f016788f59d6218eb + 2c9015e30e01ec88dc031863ff83e998e33""", + "complete": True, + "supported": True, + }, + { + "name": "CERT.SF", + "size": 384, + "type": "text/plain", + "score": 6.292509868171916e-06, + "sha256": """db5b14f8ccb0276e6db502e2b3ad1e + 75728a2d65c1798fcbe1ed8e153b0b17a6""", + "complete": True, + "supported": True, + }, + { + "name": "a.png", + "size": 87, + "type": "image/png", + "score": 0.0, + "sha256": """cc30bfc9a985956c833a135389743e96 + 835fdddae75aab5f06f3cb8d10f1af9f""", + "complete": True, + "supported": True, + }, + { + "name": "CERT.RSA", + "size": 481, + "type": "application/octet-stream", + "score": "NaN", + "sha256": """3b3b283f338421ae31532a508bbc6aa8c + 1da54fc75357cfa9ac97cd4e46040a7""", + "complete": True, + "supported": False, + }, + { + "name": "classes.dex", + "size": 920, + "type": "application/octet-stream", + "score": "NaN", + "sha256": """fab857801d10f45887ad376263de6bc1c + 9e1893060d63cb5ad4eefb72f354112""", + "complete": True, + "supported": False, + }, + { + "name": "resources.arsc", + "size": 560, + "type": "application/octet-stream", + "score": "NaN", + "sha256": """d118e4e8b4921dbcaa5874012fb8426a08 + a195461285dee7c42b1bd7c6028802""", + "complete": True, + "supported": False, + }, + { + "name": "AndroidManifest.xml", + "size": 1248, + "type": "application/octet-stream", + "score": "NaN", + "sha256": """a718ac6589ff638ba8d799824ecdf0a858 + 77f9e0381e6b573bf552875dd04ce9""", + "complete": True, + "supported": False, + }, + ], + "sha256": """ac24043d48dadc390877a6151515565b + 1fdc1dab028ee2d95d80bd80085d9376""", + "category": "ARCHIVE", + "complete": True, + "encoding": None, + "extracted": True, + "predicted": True, + "scan_time": 219511, + "supported": True, + "insert_date": 1717233771, + "parent_hash": [None], + }, + }, + 200, + ), + ), + ) + ] + return super()._monkeypatch(patches=patches) diff --git a/api_app/analyzers_manager/observable_analyzers/vulners.py b/api_app/analyzers_manager/observable_analyzers/vulners.py new file mode 100644 index 0000000000..2efc53f476 --- /dev/null +++ b/api_app/analyzers_manager/observable_analyzers/vulners.py @@ -0,0 +1,65 @@ +import logging + +import requests + +from api_app.analyzers_manager import classes +from tests.mock_utils import MockUpResponse, if_mock_connections, patch + +logger = logging.getLogger(__name__) + + +class Vulners(classes.ObservableAnalyzer): + """ + This analyzer is a wrapper for the vulners project. + """ + + score_AI: bool = False + skip: int = 0 + size: int = 5 + _api_key_name: str + url = "https://vulners.com/api/v3" + + def search_ai(self): + return requests.post( + url=self.url + "/ai/scoretext/", + headers={"Content-Type": "application/json"}, + json={"text": self.observable_name, "apiKey": self._api_key_name}, + ) + + def search_databse(self): + return requests.post( + url=self.url + "/search/lucene", + headers={"Content-Type": "application/json"}, + json={ + "query": self.observable_name, + "skip": self.size, + "size": self.skip, + "apiKey": self._api_key_name, + }, + ) + + def run(self): + response = None + if self.score_AI: + response = self.search_ai() + else: + response = self.search_databse() + response.raise_for_status() + return response.json() + + # this is a framework implication + def update(self) -> bool: + pass + + @classmethod + def _monkeypatch(cls): + response = {"result": "OK", "data": {"score": [6.5, "NONE"]}} + patches = [ + if_mock_connections( + patch( + "requests.post", + return_value=MockUpResponse(response, 200), + ), + ) + ] + return super()._monkeypatch(patches=patches) diff --git a/api_app/serializers/__init__.py b/api_app/serializers/__init__.py index 02441453f1..52d8cb1143 100644 --- a/api_app/serializers/__init__.py +++ b/api_app/serializers/__init__.py @@ -1,18 +1,16 @@ from django.conf import settings -from django.utils.timezone import now from rest_framework import serializers as rfs from rest_framework.exceptions import ValidationError from rest_framework.fields import Field -from rest_framework.serializers import ModelSerializer from api_app.interfaces import OwnershipAbstractModel from certego_saas.apps.organization.organization import Organization +from certego_saas.ext.upload.elastic import BISerializer -class AbstractBIInterface(ModelSerializer): +class AbstractBIInterface(BISerializer): application = rfs.CharField(read_only=True, default="IntelOwl") environment = rfs.SerializerMethodField(method_name="get_environment") - timestamp: Field username: Field name: Field class_instance = rfs.SerializerMethodField( @@ -23,10 +21,7 @@ class AbstractBIInterface(ModelSerializer): end_time: Field class Meta: - fields = [ - "application", - "environment", - "timestamp", + fields = BISerializer.Meta.fields + [ "username", "name", "class_instance", @@ -47,16 +42,9 @@ def get_environment(instance=None): else: return "test" - def to_elastic_dict(self, data): - return { - "_source": data, - "_index": settings.ELASTICSEARCH_BI_INDEX - + "-" - + self.get_environment() - + "-" - + now().strftime("%Y.%m"), - "_op_type": "index", - } + @staticmethod + def get_index(): + return settings.ELASTICSEARCH_BI_INDEX class ModelWithOwnershipSerializer(rfs.ModelSerializer): diff --git a/api_app/serializers/job.py b/api_app/serializers/job.py index eee03580fc..51d5f9cadc 100755 --- a/api_app/serializers/job.py +++ b/api_app/serializers/job.py @@ -1155,7 +1155,7 @@ class Meta: def to_representation(self, instance: Job): data = super().to_representation(instance) - return self.to_elastic_dict(data) + return self.to_elastic_dict(data, self.get_index()) @staticmethod def get_playbook(instance: Job): diff --git a/api_app/serializers/report.py b/api_app/serializers/report.py index bd8ec88fbe..adbbafc818 100644 --- a/api_app/serializers/report.py +++ b/api_app/serializers/report.py @@ -32,7 +32,7 @@ class Meta: def to_representation(self, instance: AbstractReport): data = super().to_representation(instance) - return self.to_elastic_dict(data) + return self.to_elastic_dict(data, self.get_index()) def get_class_instance(self, instance: AbstractReport): return super().get_class_instance(instance).split("report")[0] diff --git a/api_app/visualizers_manager/classes.py b/api_app/visualizers_manager/classes.py index f86b0002b0..9d9c57637f 100644 --- a/api_app/visualizers_manager/classes.py +++ b/api_app/visualizers_manager/classes.py @@ -16,6 +16,7 @@ VisualizableIcon, VisualizableLevelSize, VisualizableSize, + VisualizableTableColumnSize, ) from api_app.visualizers_manager.exceptions import ( VisualizerConfigurationException, @@ -208,7 +209,7 @@ def __init__( f"value {v} should be a VisualizableObject and not a string" ) if fill_empty and not value: - value = [VisualizableBase(value="no data available", disable=False)] + value = [VisualizableBase(value="no data available", disable=True)] if not name: start_open = True self.value = value @@ -261,23 +262,58 @@ def type(self) -> str: return "vertical_list" +class VisualizableTableColumn: + def __init__( + self, + name: str, + max_width: VisualizableTableColumnSize = VisualizableTableColumnSize.S_300, + description: str = "", + disable_filters: bool = False, + disable_sort_by: bool = False, + ): + self.name = name + self.description = description + self.disable_filters = disable_filters + self.disable_sort_by = disable_sort_by + self.max_width = max_width + + @property + def attributes(self) -> List[str]: + return [ + "name", + "description", + "disable_filters", + "disable_sort_by", + "max_width", + ] + + def to_dict(self) -> Dict: + if not self: + return {} + result = {attr: getattr(self, attr) for attr in self.attributes} + for key, value in result.items(): + if isinstance(value, Enum): + result[key] = value.value + return result + + class VisualizableTable(VisualizableObject): def __init__( self, - columns: List[str], + columns: List[VisualizableTableColumn], data: List[Dict[str, VisualizableObject]], size: VisualizableSize = VisualizableSize.S_AUTO, alignment: VisualizableAlignment = VisualizableAlignment.AROUND, page_size: int = 5, - disable_filters: bool = False, - disable_sort_by: bool = False, + sort_by_id: str = "", + sort_by_desc: bool = False, ): super().__init__(size=size, alignment=alignment, disable=False) self.data = data self.columns = columns self.page_size = page_size - self.disable_filters = disable_filters - self.disable_sort_by = disable_sort_by + self.sort_by_id = sort_by_id + self.sort_by_desc = sort_by_desc @property def attributes(self) -> List[str]: @@ -285,8 +321,8 @@ def attributes(self) -> List[str]: "data", "columns", "page_size", - "disable_filters", - "disable_sort_by", + "sort_by_id", + "sort_by_desc", ] @property @@ -296,6 +332,7 @@ def type(self) -> str: def to_dict(self) -> Dict: result = super().to_dict() data: List[Dict[str, VisualizableObject]] = result.pop("data", []) + columns: List[VisualizableTableColumn] = result.pop("columns", []) if any(x for x in data): new_data = [] for element in data: @@ -309,6 +346,12 @@ def to_dict(self) -> Dict: result["data"] = new_data else: result["data"] = [] + if any(x for x in columns): + result["columns"] = [ + column.to_dict() for column in columns if column is not None + ] + else: + result["columns"] = [] result.pop("disable") return result @@ -384,6 +427,8 @@ class Visualizer(Plugin, metaclass=abc.ABCMeta): HList = VisualizableHorizontalList Table = VisualizableTable + TableColumn = VisualizableTableColumn + LevelSize = VisualizableLevelSize Page = VisualizablePage Level = VisualizableLevel diff --git a/api_app/visualizers_manager/enums.py b/api_app/visualizers_manager/enums.py index 595e803262..a4883e8587 100644 --- a/api_app/visualizers_manager/enums.py +++ b/api_app/visualizers_manager/enums.py @@ -124,3 +124,17 @@ def __str__(self): def __bool__(self): return True + + +class VisualizableTableColumnSize(enum.Enum): + """Column size for VisualizebleTable elements""" + + S_50 = 50 + S_100 = 100 + S_150 = 150 + S_200 = 200 + S_250 = 250 + S_300 = 300 + + def __str__(self): + return self.value diff --git a/api_app/visualizers_manager/visualizers/quokka/observable.py b/api_app/visualizers_manager/visualizers/quokka/observable.py index 98d43debd9..6c89c707cd 100644 --- a/api_app/visualizers_manager/visualizers/quokka/observable.py +++ b/api_app/visualizers_manager/visualizers/quokka/observable.py @@ -12,6 +12,7 @@ from api_app.visualizers_manager.decorators import ( visualizable_error_handler_with_params, ) +from api_app.visualizers_manager.enums import VisualizableTableColumnSize from api_app.visualizers_manager.visualizers.quokka.field_description import ( FieldDescription, ) @@ -1198,20 +1199,67 @@ def _attacker_by_range(self) -> Visualizer.HList: } ) + columns = [ + Visualizer.TableColumn( + name="time_range", + max_width=VisualizableTableColumnSize.S_300, + disable_filters=True, + disable_sort_by=True, + ), + Visualizer.TableColumn( + name="total_hours_seen", + max_width=VisualizableTableColumnSize.S_300, + disable_filters=True, + disable_sort_by=True, + description=( + "Number of hours when the attacker was active." + " This is a datum about how much time has" + " been active the attacker." + ), + ), + Visualizer.TableColumn( + name="total_hits", + max_width=VisualizableTableColumnSize.S_300, + disable_filters=True, + disable_sort_by=True, + description=( + "Number of times the attacker tried an attack." + " This a datum about the aggressivity of the attack." + ), + ), + Visualizer.TableColumn( + name="submitters", + max_width=VisualizableTableColumnSize.S_300, + disable_filters=True, + disable_sort_by=True, + description="Sources that reported the attacker.", + ), + Visualizer.TableColumn( + name="exploits", + max_width=VisualizableTableColumnSize.S_300, + disable_filters=True, + disable_sort_by=True, + description=( + "If available, " "the CVEs that the attacker tried to exploit." + ), + ), + Visualizer.TableColumn( + name="attack_types", + max_width=VisualizableTableColumnSize.S_300, + disable_filters=True, + disable_sort_by=True, + description=( + "Category of the attack " + "(protocol, service and type) Example: http/scan" + ), + ), + ] + return Visualizer.HList( value=[ Visualizer.Table( data=time_range_data, - columns=[ - "time_range", - "total_hours_seen", - "total_hits", - "submitters", - "exploits", - "attack_types", - ], - disable_filters=True, - disable_sort_by=True, + columns=columns, size=Visualizer.Size.S_ALL, ) ], diff --git a/configuration/nginx/https.conf b/configuration/nginx/https.conf index 8cc801b3da..5abfbad906 100755 --- a/configuration/nginx/https.conf +++ b/configuration/nginx/https.conf @@ -14,7 +14,6 @@ limit_req_zone $binary_remote_addr zone=adminlimit:10m rate=1r/s; server { listen 443 ssl; - ssl on; ssl_protocols TLSv1.2 TLSv1.3; ssl_certificate /usr/local/share/ca-certificates/intelowl.crt; ssl_certificate_key /etc/ssl/private/intelowl.key; diff --git a/docker/.env b/docker/.env index bc984be7da..b9f079e8eb 100755 --- a/docker/.env +++ b/docker/.env @@ -1,6 +1,6 @@ ### DO NOT CHANGE THIS VALUE !! ### It should be updated only when you pull latest changes off from the 'master' branch of IntelOwl. # this variable must start with "REACT_APP_" to be used in the frontend too -REACT_APP_INTELOWL_VERSION="v6.0.2" +REACT_APP_INTELOWL_VERSION="v6.0.4" # if you want to use a nfs volume for shared files # NFS_ADDRESS= diff --git a/docker/Dockerfile_nginx b/docker/Dockerfile_nginx index a1f0c735e1..49ae03d40d 100755 --- a/docker/Dockerfile_nginx +++ b/docker/Dockerfile_nginx @@ -1,4 +1,4 @@ -FROM library/nginx:1.26.0-alpine +FROM library/nginx:1.27.0-alpine # do not remove this RUN apk update && apk upgrade && apk add bash diff --git a/docker/default.private.yml b/docker/default.private.yml index 9077180d97..2383e65eab 100755 --- a/docker/default.private.yml +++ b/docker/default.private.yml @@ -7,6 +7,9 @@ services: image: local/intel_owl_private_uwsgi volumes: - /etc/ssl/certs:/opt/deploy/intel_owl/certs + healthcheck: + start_period: 10s + retries: 20 daphne: container_name: intelowl_private_daphne diff --git a/docker/default.yml b/docker/default.yml index da93e5e589..9406d794ac 100755 --- a/docker/default.yml +++ b/docker/default.yml @@ -25,8 +25,8 @@ services: test: [ "CMD-SHELL", "nc -z localhost 8001 || exit 1" ] interval: 5s timeout: 2s - start_period: 10s - retries: 20 + start_period: 300s + retries: 2 daphne: image: intelowlproject/intelowl:${REACT_APP_INTELOWL_VERSION} @@ -52,7 +52,6 @@ services: uwsgi: condition: service_healthy - nginx: image: intelowlproject/intelowl_nginx:${REACT_APP_INTELOWL_VERSION} container_name: intelowl_nginx @@ -65,8 +64,6 @@ services: - ../configuration/nginx/locations.conf:/etc/nginx/locations.conf - nginx_logs:/var/log/nginx - static_content:/var/www/static - # ports: - # - "80:80" depends_on: uwsgi: condition: service_healthy diff --git a/docker/entrypoints/uwsgi.sh b/docker/entrypoints/uwsgi.sh index ed47c5b36c..0c60c67291 100755 --- a/docker/entrypoints/uwsgi.sh +++ b/docker/entrypoints/uwsgi.sh @@ -4,7 +4,8 @@ until cd /opt/deploy/intel_owl do echo "Waiting for server volume..." done -sudo su www-data -c "mkdir -p /var/log/intel_owl/django /var/log/intel_owl/uwsgi /var/log/intel_owl/asgi /opt/deploy/intel_owl/files_required/blint /opt/deploy/intel_owl/files_required/yara" +mkdir -p /var/log/intel_owl/django /var/log/intel_owl/uwsgi /var/log/intel_owl/asgi /opt/deploy/intel_owl/files_required/blint /opt/deploy/intel_owl/files_required/yara +chown -R www-data:www-data /var/log/intel_owl/django /var/log/intel_owl/uwsgi /var/log/intel_owl/asgi /opt/deploy/intel_owl/files_required/blint /opt/deploy/intel_owl/files_required/yara # Apply database migrations echo "Waiting for db to be ready..." @@ -34,6 +35,17 @@ CHANGELOG_NOTIFICATION_COMMAND='python manage.py changelog_notification .github/ if [[ $DEBUG == "True" ]] && [[ $DJANGO_TEST_SERVER == "True" ]]; then + # Create superuser if it does not exist + exists=$(echo "from django.contrib.auth import get_user_model; User = get_user_model(); print(User.objects.filter(username='admin').exists())" | python manage.py shell) + + if [ "$exists" == "True" ]; then + echo "Superuser 'admin' already exists." + else + echo "Creating superuser 'admin' with password 'admin'..." + echo "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser('admin', 'admin@example.com', 'admin')" | python manage.py shell + echo "Superuser 'admin' created successfully." + fi + $CHANGELOG_NOTIFICATION_COMMAND --debug python manage.py runserver 0.0.0.0:8001 else diff --git a/docker/nginx.override.yml b/docker/nginx.override.yml new file mode 100644 index 0000000000..3b4d38efc1 --- /dev/null +++ b/docker/nginx.override.yml @@ -0,0 +1,4 @@ +services: + nginx: + ports: + - "80:80" diff --git a/docker/scripts/watchman_install.sh b/docker/scripts/watchman_install.sh index a2e067fb13..e3896d0daf 100755 --- a/docker/scripts/watchman_install.sh +++ b/docker/scripts/watchman_install.sh @@ -1,7 +1,10 @@ #!/bin/bash +echo "WATCHMAN value is " +echo $WATCHMAN + # This script can be disabled during development using WATCHMAN=false env variable -if [ "$WATCHMAN" == "False" ]; then echo "Skipping WATCHMAN installation because we are not in test mode"; exit 0; fi +if [ "$WATCHMAN" = "false" ]; then echo "Skipping WATCHMAN installation because we are not in test mode"; exit 0; fi pip3 install --compile -r requirements/django-server-requirements.txt diff --git a/docker/test.override.yml b/docker/test.override.yml index c9c1de84e9..24cda42f25 100755 --- a/docker/test.override.yml +++ b/docker/test.override.yml @@ -14,7 +14,6 @@ services: - DEBUG=True - DJANGO_TEST_SERVER=True - DJANGO_WATCHMAN_TIMEOUT=60 - - WATCHMAN=True daphne: image: intelowlproject/intelowl:test @@ -28,8 +27,6 @@ services: image: intelowlproject/intelowl_nginx:test volumes: - ../configuration/nginx/django_server.conf:/etc/nginx/conf.d/default.conf - ports: - - "80:80" celery_beat: image: intelowlproject/intelowl:test @@ -37,16 +34,10 @@ services: - ../:/opt/deploy/intel_owl environment: - DEBUG=True - - DJANGO_TEST_SERVER=True - - DJANGO_WATCHMAN_TIMEOUT=60 - - WATCHMAN=True celery_worker_default: image: intelowlproject/intelowl:test volumes: - ../:/opt/deploy/intel_owl environment: - - DEBUG=True - - DJANGO_TEST_SERVER=True - - DJANGO_WATCHMAN_TIMEOUT=60 - - WATCHMAN=True \ No newline at end of file + - DEBUG=True \ No newline at end of file diff --git a/docker/traefik.override.yml b/docker/traefik.override.yml deleted file mode 100755 index dada2fd93e..0000000000 --- a/docker/traefik.override.yml +++ /dev/null @@ -1,34 +0,0 @@ -services: - traefik: - image: "traefik:v2.2" - container_name: "traefik" - command: - #- "--log.level=DEBUG" - - "--api.insecure=true" - - "--providers.docker=true" - - "--providers.docker.exposedbydefault=false" - - "--entrypoints.web.address=:80" - - "--entrypoints.websecure.address=:443" - - "--certificatesresolvers.myresolver.acme.httpchallenge=true" - - "--certificatesresolvers.myresolver.acme.httpchallenge.entrypoint=web" - #- "--certificatesresolvers.myresolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory" - - "--certificatesresolvers.myresolver.acme.email=postmaster@example.com" - - "--certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json" - ports: - - "80:80" - - "443:443" - volumes: - - "./letsencrypt:/letsencrypt" - - "/var/run/docker.sock:/var/run/docker.sock:ro" - - nginx: - depends_on: - - traefik - labels: - - "traefik.enable=true" - - "traefik.http.routers.nginx.rule=Host(`example.com`)" - - "traefik.http.routers.nginx.entrypoints=websecure" - - "traefik.http.routers.nginx.tls.certresolver=myresolver" - ports: - - "443:443" - diff --git a/docker/traefik.yml b/docker/traefik.yml new file mode 100644 index 0000000000..dccf7cb452 --- /dev/null +++ b/docker/traefik.yml @@ -0,0 +1,15 @@ +services: + traefik: + image: "traefik:3.0" + labels: + - "traefik.enable=true" + + nginx: + depends_on: + - traefik + labels: + - "traefik.enable=true" + - "traefik.http.services.nginx.loadbalancer.server.port=80" + expose: + - "80" + diff --git a/docker/traefik_local.yml b/docker/traefik_local.yml new file mode 100644 index 0000000000..8254b0f2b6 --- /dev/null +++ b/docker/traefik_local.yml @@ -0,0 +1,28 @@ +services: + traefik: + container_name: "intelowl_traefik_local" + command: + # Pleases refer to the official documentation: https://doc.traefik.io/traefik/ + # LOGS + - "--log.level=DEBUG" + # DASHBOARD + - "--api.insecure=true" + - "--api.dashboard=true" + # ENTRYPOINTS - redirect every request to use HTTPS + - "--entrypoints.web.address=:80" + # PROVIDERS + - "--providers.docker=true" + - "--providers.docker.watch=true" + - "--providers.docker.exposedbydefault=false" + ports: + - "80:80" + - "8080:8080" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock:ro" + + nginx: + depends_on: + - traefik + labels: + - "traefik.http.routers.nginx.rule=Host(`localhost`)" + - "traefik.http.routers.nginx.entrypoints=web" diff --git a/docker/traefik_prod.yml b/docker/traefik_prod.yml new file mode 100644 index 0000000000..ed71e33625 --- /dev/null +++ b/docker/traefik_prod.yml @@ -0,0 +1,66 @@ +services: + traefik: + container_name: "intelowl_traefik_prod" + command: + # Pleases refer to the official documentation: https://doc.traefik.io/traefik/ + # LOGS - may be omitted if you don't need logs + - "--accesslog=true" + - "--accesslog.filepath=/var/log/traefik/access.log" + - "--log.filePath=/var/log/traefik/traefik.log" + - "--log.level=DEBUG" + # DASHBOARD + - "--api.dashboard=true" + # PROVIDERS + - "--providers.docker=true" + - "--providers.docker.watch=true" + - "--providers.docker.exposedbydefault=false" + # ENTRYPOINTS - redirect every request to use HTTPS + - "--entrypoints.web.address=:80" + - "--entryPoints.web.http.redirections.entryPoint.to=websecure" + - "--entryPoints.web.http.redirections.entryPoint.scheme=https" + - "--entryPoints.web.http.redirections.entrypoint.permanent=true" + - "--entrypoints.websecure.address=:443" + # CERTIFICATE RESOLVERS + - "--certificatesresolvers.le.acme.httpchallenge=true" + - "--certificatesresolvers.le.acme.httpchallenge.entrypoint=web" + # DEV - use this for testing purposes or else you might get blocked - # CHANGE THIS + - "--certificatesresolvers.le.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory" + # PROD - use this if everything works fine - # CHANGE THIS + #- "--certificatesresolvers.le.acme.caserver=https://acme-v02.api.letsencrypt.org/directory" + - "--certificatesresolvers.le.acme.email=postmaster@example.com" # CHANGE THIS + - "--certificatesresolvers.le.acme.storage=/etc/letsencrypt/acme.json" + labels: + # DASHBOARD - setup for secure dashboard access + - "traefik.http.routers.dashboard.rule=Host(`traefik.intelowl.example.com`) && (PathPrefix(`/api`) || PathPrefix(`/dashboard`))" # CHANGE THIS (Only "Host"!) + - "traefik.http.routers.dashboard.service=api@internal" + - "traefik.http.routers.dashboard.entrypoints=websecure" + - "traefik.http.routers.dashboard.tls=true" + - "traefik.http.routers.dashboard.tls.certresolver=le" + # auth/ipallowlist middlewares allow to limit/secure access - may be omitted + # Here you may define which IPs/CIDR ranges are allowed to access this resource - may be omitted + # - "traefik.http.routers.dashboard.middlewares=dashboard-ipallowlist" + # - "traefik.http.middlewares.dashboard-ipallowlist.ipallowlist.sourcerange=0.0.0.0" # CHANGE THIS + # You can create a new user and password for basic auth with this command: + # echo $(htpasswd -nbB user password) | sed -e s/\\$/\\$\\$/g + # - "traefik.http.routers.dashboard.middlewares=auth" + # - "traefik.http.middlewares.auth.basicauth.users=user:$$2y$$05$$v.ncVNXEJriELglCBEZJmu5I1VrhyhuaVCXATRQTUVuvOF1qgYwpa" # CHANGE THIS (default is user:password) + - "traefik.http.services.dashboard.loadbalancer.server.port=8080" + ports: + - "80:80" + - "443:443" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock:ro" + - "/etc/letsencrypt:/etc/letsencrypt" + - "/var/log/traefik:/var/log/traefik" + + nginx: + depends_on: + - traefik + labels: + - "traefik.http.routers.nginx.rule=Host(`intelowl.example.com`)" # CHANGE THIS + - "traefik.http.routers.nginx.entrypoints=websecure" + - "traefik.http.routers.nginx.tls=true" + - "traefik.http.routers.nginx.tls.certresolver=le" + # Here you may define which IPs/CIDR ranges are allowed to access this resource + # - "traefik.http.routers.nginx.middlewares=nginx-ipallowlist" + # - "traefik.http.middlewares.nginx-ipallowlist.ipallowlist.sourcerange=0.0.0.0" # CHANGE THIS diff --git a/docs/source/Advanced-Usage.md b/docs/source/Advanced-Usage.md index f0200bb9eb..310cbb1605 100755 --- a/docs/source/Advanced-Usage.md +++ b/docs/source/Advanced-Usage.md @@ -60,7 +60,7 @@ After a user registration has been made, an email is sent to the user to verify Once the user has verified their email, they would be manually vetted before being allowed to use the IntelOwl platform. The registration requests would be handled in the Django Admin page by admins. If you have IntelOwl deployed on an AWS instance with an IAM role you can use the [SES](/Advanced-Usage.md#ses) service. -To have the "Registration" page to work correctly, you must configure some variables before starting IntelOwl. See [Optional Environment Configuration](/Installation.md#other-optional-configuration-to-enable-specific-services-features) +To have the "Registration" page to work correctly, you must configure some variables before starting IntelOwl. See [Optional Environment Configuration](https://intelowl.readthedocs.io/en/latest/Installation.html#other-optional-configuration-to-enable-specific-services-features) In a development environment the emails that would be sent are written to the standard output. diff --git a/docs/source/Contribute.md b/docs/source/Contribute.md index 848fb5c359..0d5ba976da 100755 --- a/docs/source/Contribute.md +++ b/docs/source/Contribute.md @@ -331,7 +331,11 @@ To do so, some utility classes have been made: VisualizableLevel - Each level corresponds to a line in the final frontend visualizations. Every level is made of a VisualizableHorizontalList. + + Each level corresponds to a line in the final frontend visualizations. Every level is made of a + VisualizableHorizontalList. + The dimension of the level can be customized with the size parameter (1 is the biggest, 6 is the smallest). + Visualizable Level example diff --git a/docs/source/Installation.md b/docs/source/Installation.md index cad75d7f49..794fd5ee26 100644 --- a/docs/source/Installation.md +++ b/docs/source/Installation.md @@ -13,6 +13,7 @@ In some systems you could find pre-installed older versions. Please check this a @@ -35,7 +36,10 @@ However, if you feel lazy, you could just install and test IntelOwl with the fol git clone https://github.com/intelowlproject/IntelOwl cd IntelOwl/ -# verify installed dependencies and start the app +# run helper script to verify installed dependencies and configure basic stuff +./initialize.sh + +# start the app ./start prod up # now the application is running on http://localhost:80 @@ -181,9 +185,12 @@ There are 3 options to execute the web server: We provide a specific docker-compose file that leverages [Traefik](https://docs.traefik.io/) to allow fast deployments of public-faced and HTTPS-enabled applications. - Before using it, you should configure the configuration file `docker/traefik.override.yml` by changing the email address and the hostname where the application is served. For a detailed explanation follow the official documentation: [Traefix doc](https://docs.traefik.io/user-guides/docker-compose/acme-http/). + Before using the production deployment, you should change the configuration file `docker/traefik_prod.yml` by customising every line which has a "# CHANGE THIS" comment appended to reflect your environment. For a detailed explanation follow the official documentation: [Traefix doc](https://doc.traefik.io/traefik/). + + The development deployment is ready to go and running on localhost. - After the configuration is done, you can add the option `--traefik` while executing [`./start`](#run) + After the configuration is done, you can add the option `--traefik_prod` for a production ready deployment while executing [`./start`](#run) + If you just want to test things out you can add the option `--traefik_local` for a development deployment. ## Run diff --git a/docs/source/Usage.md b/docs/source/Usage.md index b605ee4209..9f85784663 100644 --- a/docs/source/Usage.md +++ b/docs/source/Usage.md @@ -100,6 +100,7 @@ The following is the list of the available analyzers you can run out-of-the-box. * `Suricata`: Analyze PCAPs with open IDS signatures with [Suricata engine](https://github.com/OISF/suricata) * `Thug_HTML_Info`: Perform hybrid dynamic/static analysis on a HTML file using [Thug low-interaction honeyclient](https://thug-honeyclient.readthedocs.io/) * `Xlm_Macro_Deobfuscator`: [XlmMacroDeobfuscator](https://github.com/DissectMalware/XLMMacroDeobfuscator) deobfuscate xlm macros +* `DetectItEasy`:[DetectItEasy](https://github.com/horsicq/Detect-It-Easy) is a program for determining types of files. * `Yara`: scan a file with * [ATM malware yara rules](https://github.com/fboldewin/YARA-rules) * [bartblaze yara rules](https://github.com/bartblaze/Yara-rules) @@ -151,6 +152,8 @@ The following is the list of the available analyzers you can run out-of-the-box. - `YARAify_File_Search`: scan an hash against [YARAify](https://yaraify.abuse.ch/) database - `Zippy_scan` : [Zippy](https://github.com/thinkst/zippy): Fast method to classify text as AI or human-generated; takes in `lzma`,`zlib`,`brotli` as input based engines; `ensemble` being default. - `Blint`: [Blint](https://github.com/owasp-dep-scan/blint) is a Binary Linter that checks the security properties and capabilities of your executables. Supported binary formats: - Android (apk, aab) - ELF (GNU, musl) - PE (exe, dll) - Mach-O (x64, arm64) +- `MalprobScan` : [Malprob](https://malprob.io/) is a leading malware detection and identification service, powered by cutting-edge AI technology. + ##### Observable analyzers (ip, domain, url, hash) ###### Internal tools @@ -255,7 +258,10 @@ The following is the list of the available analyzers you can run out-of-the-box. * `TweetFeed`: [TweetFeed](https://tweetfeed.live/) collects Indicators of Compromise (IOCs) shared by the infosec community at Twitter.\r\nHere you will find malicious URLs, domains, IPs, and SHA256/MD5 hashes. * `HudsonRock`: [Hudson Rock](https://cavalier.hudsonrock.com/docs) provides its clients the ability to query a database of over 27,541,128 computers which were compromised through global info-stealer campaigns performed by threat actors. * `CyCat`: [CyCat](https://cycat.org/) or the CYbersecurity Resource CATalogue aims at mapping and documenting, in a single formalism and catalogue available cybersecurity tools, rules, playbooks, processes and controls. - +* `Vulners`: [Vulners](vulners.com) is the most complete and the only fully correlated security intelligence database, which goes through constant updates and links 200+ data sources in a unified machine-readable format. It contains 8 mln+ entries, including CVEs, advisories, exploits, and IoCs — everything you need to stay abreast on the latest security threats. +* `AILTypoSquatting`:[AILTypoSquatting](https://github.com/typosquatter/ail-typo-squatting) is a Python library to generate list of potential typo squatting domains with domain name permutation engine to feed AIL and other systems. +* `MalprobSearch`:[Malprob](https://malprob.io/) is a leading malware detection and identification service, powered by cutting-edge AI technology. + ##### Generic analyzers (email, phone number, etc.; anything really) Some analyzers require details other than just IP, URL, Domain, etc. We classified them as `generic` Analyzers. Since the type of field is not known, there is a format for strings to be followed. diff --git a/docs/source/schema.yml b/docs/source/schema.yml index 62bc131470..e21cbf65a2 100755 --- a/docs/source/schema.yml +++ b/docs/source/schema.yml @@ -1,7 +1,7 @@ openapi: 3.0.3 info: title: IntelOwl API specification - version: 6.0.2 + version: 6.0.4 paths: /api/analyze_file: post: diff --git a/frontend/src/components/jobs/result/visualizer/elements/table.jsx b/frontend/src/components/jobs/result/visualizer/elements/table.jsx index f684a74e8e..08701567de 100644 --- a/frontend/src/components/jobs/result/visualizer/elements/table.jsx +++ b/frontend/src/components/jobs/result/visualizer/elements/table.jsx @@ -1,9 +1,11 @@ import React from "react"; import PropTypes from "prop-types"; +import { UncontrolledTooltip } from "reactstrap"; import { DataTable, DefaultColumnFilter } from "@certego/certego-ui"; import { VerticalListVisualizer } from "./verticalList"; import { HorizontalListVisualizer } from "./horizontalList"; +import { markdownToHtml } from "../../../../common/markdownToHtml"; function getAccessor(element) { if ([VerticalListVisualizer, HorizontalListVisualizer].includes(element.type)) @@ -12,44 +14,58 @@ function getAccessor(element) { return element.props.value; } -export function TableVisualizer({ - id, - size, - columns, - data, - pageSize, - disableFilters, - disableSortBy, -}) { +export function TableVisualizer({ id, size, columns, data, pageSize, sortBy }) { const tableColumns = []; columns.forEach((column) => { - const columnHeader = column.replaceAll("_", " "); + const columnHeader = column.name.replaceAll("_", " "); + const columnElement = ( + <> + {columnHeader} + {column.description && ( + + {markdownToHtml(column.description)} + + )} + + ); + tableColumns.push({ - Header: columnHeader, - id: column, - accessor: (row) => getAccessor(row[column]), + Header: columnElement, + id: column.name, + accessor: (row) => getAccessor(row[column.name]), Cell: ({ cell: { row: { original }, }, - }) => original[column], - disableFilters, - disableSortBy, + }) => original[column.name], + disableFilters: column.disableFilters, + disableSortBy: column.disableSortBy, Filter: DefaultColumnFilter, + maxWidth: column.maxWidth, }); }); const tableConfig = {}; const tableInitialState = { pageSize, + sortBy, }; return (
- parseString(column), + parseColumnElementList(column), ); validatedFields.pageSize = rawElement.page_size; - validatedFields.disableFilters = parseBool(rawElement.disable_filters); - validatedFields.disableSortBy = parseBool(rawElement.disable_sort_by); + validatedFields.sortById = parseString(rawElement.sort_by_id); + validatedFields.sortByDesc = parseBool(rawElement.sort_by_desc); break; } // base case diff --git a/frontend/src/components/jobs/result/visualizer/visualizer.jsx b/frontend/src/components/jobs/result/visualizer/visualizer.jsx index 49350d5607..acae849aaf 100644 --- a/frontend/src/components/jobs/result/visualizer/visualizer.jsx +++ b/frontend/src/components/jobs/result/visualizer/visualizer.jsx @@ -123,8 +123,7 @@ function convertToElement(element, idElement, isChild = false) { return obj; })} pageSize={element.pageSize} - disableFilters={element.disableFilters} - disableSortBy={element.disableSortBy} + sortBy={[{ id: element.sortById, desc: element.sortByDesc }]} /> ); break; diff --git a/frontend/src/constants/jobConst.js b/frontend/src/constants/jobConst.js index 6c0e530201..ddd9865854 100644 --- a/frontend/src/constants/jobConst.js +++ b/frontend/src/constants/jobConst.js @@ -180,9 +180,12 @@ export const FileExtensions = Object.freeze({ OK: "ok", PUBLICVM: "publicvm", ISO: "iso", - SH: "sh", CRX: "crx", CONFIG: "config", + /* This is a list of valid tld that are also file extnesions. + This could generate some false positives in the auto-extraction, if they are too much filter them. + sh + */ }); export const InvalidTLD = Object.freeze({ diff --git a/frontend/tests/components/jobs/result/visualizer/elements/table.test.jsx b/frontend/tests/components/jobs/result/visualizer/elements/table.test.jsx index 96b1421635..ffea7f6e09 100644 --- a/frontend/tests/components/jobs/result/visualizer/elements/table.test.jsx +++ b/frontend/tests/components/jobs/result/visualizer/elements/table.test.jsx @@ -19,7 +19,13 @@ describe("TableVisualizer component", () => { { id="test-id" size="col-6" alignment="around" - columns={["column_name"]} + columns={[ + { + name: "column_name", + maxWidth: 300, + description: "test description", + disableFilters: true, + disableSortBy: true, + }, + ]} data={[ { column_name: ( @@ -86,8 +100,7 @@ describe("TableVisualizer component", () => { }, ]} pageSize={3} - disableFilters - disableSortBy + sortBy={[{ id: "column_name", desc: false }]} />, ); diff --git a/frontend/tests/components/jobs/result/visualizer/validators.test.js b/frontend/tests/components/jobs/result/visualizer/validators.test.js index 21e6e11d5a..207d037eb5 100644 --- a/frontend/tests/components/jobs/result/visualizer/validators.test.js +++ b/frontend/tests/components/jobs/result/visualizer/validators.test.js @@ -139,8 +139,8 @@ describe("visualizer data validation", () => { type: "table", columns: [], pageSize: undefined, - disableFilters: false, - disableSortBy: false, + sortById: "", + sortByDesc: false, data: [], disable: false, }, @@ -302,7 +302,15 @@ describe("visualizer data validation", () => { type: "table", size: "6", alignment: "start", - columns: ["column_name"], + columns: [ + { + name: "column_name", + max_width: 300, + description: "test description", + disable_filters: true, + disable_sort_by: true, + }, + ], data: [ { column_name: { @@ -322,8 +330,8 @@ describe("visualizer data validation", () => { }, ], page_size: 7, - disable_filters: true, - disable_sort_by: true, + sort_by_id: "", + sort_by_desc: false, }, ], }, @@ -501,7 +509,15 @@ describe("visualizer data validation", () => { type: "table", size: "col-6", alignment: "start", - columns: ["column_name"], + columns: [ + { + name: "column_name", + maxWidth: 300, + description: "test description", + disableFilters: true, + disableSortBy: true, + }, + ], data: [ { column_name: { @@ -522,8 +538,8 @@ describe("visualizer data validation", () => { ], disable: false, pageSize: 7, - disableFilters: true, - disableSortBy: true, + sortById: "", + sortByDesc: false, }, ], }, diff --git a/frontend/tests/components/jobs/result/visualizer/visualizer.test.jsx b/frontend/tests/components/jobs/result/visualizer/visualizer.test.jsx index 595b125d82..18db76e126 100644 --- a/frontend/tests/components/jobs/result/visualizer/visualizer.test.jsx +++ b/frontend/tests/components/jobs/result/visualizer/visualizer.test.jsx @@ -164,7 +164,15 @@ describe("test VisualizerReport (conversion from backend data to frontend compon type: "table", size: "auto", alignment: "start", - columns: ["column_name"], + columns: [ + { + name: "column_name", + max_width: 300, + description: "test description", + disable_filters: true, + disable_sort_by: true, + }, + ], data: [ { column_name: { @@ -184,8 +192,8 @@ describe("test VisualizerReport (conversion from backend data to frontend compon }, ], page_size: 5, - disable_filters: true, - disable_sort_by: true, + sort_by_id: "", + sort_by_desc: false, }, ], alignment: "around", diff --git a/initialize.sh b/initialize.sh index a4fc8d7e61..b1b45143d4 100755 --- a/initialize.sh +++ b/initialize.sh @@ -76,6 +76,7 @@ if ! [ -x "$(command -v docker)" ]; then echo 'Error: Could not install docker.' >&2 exit 1 fi + rm get-docker.sh else echo 'You chose to do not install Docker. Exiting' exit 1 diff --git a/integrations/malware_tools_analyzers/Dockerfile b/integrations/malware_tools_analyzers/Dockerfile index 5466cec90b..b26a9cd101 100644 --- a/integrations/malware_tools_analyzers/Dockerfile +++ b/integrations/malware_tools_analyzers/Dockerfile @@ -97,6 +97,12 @@ RUN python3 -m venv venv \ && mkdir -p /tmp/thug/logs \ && chown -R ${USER}:${USER} /tmp/thug/logs +WORKDIR ${PROJECT_PATH}/die +RUN apt-get install --no-install-recommends -y wget tar libglib2.0-0 && \ + wget -q https://github.com/horsicq/DIE-engine/releases/download/3.01/die_lin64_portable_3.01.tar.gz && \ + tar -xzf die_lin64_portable_3.01.tar.gz + + # prepare fangfrisch installation COPY crontab /etc/cron.d/crontab RUN mkdir -m 0770 -p /var/lib/fangfrisch \ diff --git a/integrations/malware_tools_analyzers/app.py b/integrations/malware_tools_analyzers/app.py index 521b599d63..6463f6b0f2 100644 --- a/integrations/malware_tools_analyzers/app.py +++ b/integrations/malware_tools_analyzers/app.py @@ -175,6 +175,12 @@ def intercept_thug_result(context, future: Future) -> None: command_name="/opt/deploy/qiling/venv/bin/python3 /opt/deploy/qiling/analyze.py", ) +# diec is the command for Detect It Easy +shell2http.register_command( + endpoint="die", + command_name="/opt/deploy/die/die_lin64_portable/base/diec", +) + # with this, we can make http calls to the endpoint: /thug shell2http.register_command( endpoint="thug", diff --git a/integrations/malware_tools_analyzers/entrypoint.sh b/integrations/malware_tools_analyzers/entrypoint.sh index 29926df92b..304c083ff3 100755 --- a/integrations/malware_tools_analyzers/entrypoint.sh +++ b/integrations/malware_tools_analyzers/entrypoint.sh @@ -1,4 +1,6 @@ #!/bin/bash +# diec analyzer variable +export LD_LIBRARY_PATH="/opt/deploy/die/die_lin64_portable/base:$LD_LIBRARY_PATH" # without this makedirs the Dockerfile is not able to create new directories in volumes that already exist mkdir -p /var/run/clamav ${LOG_PATH} ${LOG_PATH}/clamav chown -R clamav:${USER} /var/lib/clamav /var/run/clamav ${LOG_PATH} diff --git a/integrations/phoneinfoga/compose.yml b/integrations/phoneinfoga/compose.yml index b1c7466a4b..8211ff0964 100644 --- a/integrations/phoneinfoga/compose.yml +++ b/integrations/phoneinfoga/compose.yml @@ -1,6 +1,6 @@ services: phoneinfoga: - container_name: phoneinfoga + container_name: intelowl_phoneinfoga restart: unless-stopped image: sundowndev/phoneinfoga:v2.11.0 command: diff --git a/requirements/certego-requirements.txt b/requirements/certego-requirements.txt index c46b977843..4b846c3547 100644 --- a/requirements/certego-requirements.txt +++ b/requirements/certego-requirements.txt @@ -1,4 +1,4 @@ -certego-saas==0.7.7 +certego-saas==0.7.10 # While developing for certego-saas, comment the previous line. # Then, add a line like the one below to your forked repo and your most recent commit # In this way you can test your changes directly in IntelOwl with the complete application diff --git a/requirements/project-requirements.txt b/requirements/project-requirements.txt index e34fdee9e7..6c5e3ff3f6 100755 --- a/requirements/project-requirements.txt +++ b/requirements/project-requirements.txt @@ -7,7 +7,7 @@ django-filter==24.2 django-storages==1.14 django-celery-beat==2.6.0 django-celery-results==2.5.0 -django-ses == 4.0.0 +django-ses == 4.1.0 django-iam-dbauth==0.1.4 django-prettyjson==0.4.1 django-silk==5.1.0 @@ -16,7 +16,7 @@ django-treebeard==4.7 jsonschema==4.22.0 # django rest framework libs -Authlib==1.3.0 +Authlib==1.3.1 djangorestframework==3.15.1 djangorestframework-filters==1.0.0.dev2 drf-spectacular==0.27.1 @@ -35,7 +35,7 @@ whitenoise==6.6.0 daphne==4.1.0 channels==4.1.0 channels-redis==4.2.0 -elasticsearch-dsl==8.13.0 +elasticsearch-dsl==8.14.0 # plugins GitPython==3.1.41 @@ -67,7 +67,7 @@ pypssl==2.2 pysafebrowsing==0.1.1 PySocks==1.7.1 py-tlsh==4.7.2 -quark-engine==24.5.1 +quark-engine==24.6.1 speakeasy-emulator==1.5.9 telfhash==0.9.8 yara-python==4.5.0 @@ -78,7 +78,7 @@ querycontacts==2.0.0 blint==2.1.5 hfinger==0.2.2 permhash==0.1.4 - +ail_typo_squatting==2.7.4 # this is required because XLMMacroDeobfuscator does not pin the following packages pyxlsb2==0.0.8 xlrd2==1.3.4 diff --git a/start b/start index ada62bc783..454de88819 100755 --- a/start +++ b/start @@ -9,7 +9,7 @@ declare -A env_arguments=(["prod"]=1 ["test"]=1 ["ci"]=1 ["local"]=1) declare -A test_mode=(["test"]=1 ["ci"]=1) declare -A cmd_arguments=(["build"]=1 ["up"]=1 ["start"]=1 ["restart"]=1 ["down"]=1 ["stop"]=1 ["kill"]=1 ["logs"]=1 ["ps"]=1) -declare -A path_mapping=(["default"]="docker/default.yml" ["redis"]="docker/redis.override.yml" ["postgres"]="docker/postgres.override.yml" ["rabbitmq"]="docker/rabbitmq.override.yml" ["test"]="docker/test.override.yml" ["ci"]="docker/ci.override.yml" ["custom"]="docker/custom.override.yml" ["traefik"]="docker/traefik.override.yml" ["multi_queue"]="docker/multi-queue.override.yml" ["test_multi_queue"]="docker/test.multi-queue.override.yml" ["flower"]="docker/flower.override.yml" ["test_flower"]="docker/test.flower.override.yml" ["elastic"]="docker/elasticsearch.override.yml" ["https"]="docker/https.override.yml" ["nfs"]="docker/nfs.override.yml" ["default.private"]="docker/default.private.yml" ["postgres.private"]="docker/postgres.private.override.yml" ["rabbitmq.private"]="docker/rabbitmq.private.override.yml" ["multi_queue.private"]="docker/multi-queue.private.override.yml" ["flower.private"]="docker/flower.private.override.yml" ["sqs"]="docker/sqs.override.yml") +declare -A path_mapping=(["default"]="docker/default.yml" ["redis"]="docker/redis.override.yml" ["postgres"]="docker/postgres.override.yml" ["rabbitmq"]="docker/rabbitmq.override.yml" ["test"]="docker/test.override.yml" ["ci"]="docker/ci.override.yml" ["custom"]="docker/custom.override.yml" ["traefik"]="docker/traefik.yml" ["traefik_prod"]="docker/traefik_prod.yml" ["traefik_local"]="docker/traefik_local.yml" ["multi_queue"]="docker/multi-queue.override.yml" ["test_multi_queue"]="docker/test.multi-queue.override.yml" ["flower"]="docker/flower.override.yml" ["test_flower"]="docker/test.flower.override.yml" ["elastic"]="docker/elasticsearch.override.yml" ["https"]="docker/https.override.yml" ["nfs"]="docker/nfs.override.yml" ["nginx_default"]="docker/nginx.override.yml" ["default.private"]="docker/default.private.yml" ["postgres.private"]="docker/postgres.private.override.yml" ["rabbitmq.private"]="docker/rabbitmq.private.override.yml" ["multi_queue.private"]="docker/multi-queue.private.override.yml" ["flower.private"]="docker/flower.private.override.yml" ["sqs"]="docker/sqs.override.yml") print_synopsis () { echo "SYNOPSIS" @@ -38,7 +38,8 @@ print_help () { echo " file." echo " --multi_queue Uses the multiqueue.override.yml compose file." echo " --nfs Uses the nfs.override.yml compose file." - echo " --traefik Uses the traefik.override.yml compose file." + echo " --traefik_prod Uses the traefik.yml and traefik_prod.yml compose file." + echo " --traefik_local Uses the traefik.yml and traefik_local.yml compose file." echo " --use-external-database Do NOT use postgres.override.yml compose file." echo " --use-external-redis Do NOT use redis.override.yml compose file." echo " --rabbitmq Uses the rabbitmq.override.yml compose file." @@ -91,11 +92,7 @@ load_env () { } if ! docker compose version > /dev/null 2>&1; then - ./initialize.sh - if ! [ $? ]; then - echo "Failed to install dependencies." >&2 - exit 127 - fi + echo "Run ./initialize.sh to install Docker Compose 2" fi check_parameters "$@" && shift 2 @@ -218,8 +215,12 @@ while [[ $# -gt 0 ]]; do params["https"]=true shift 1 ;; - --traefik) - params["traefik"]=true + --traefik_prod) + params["traefik_prod"]=true + shift 1 + ;; + --traefik_local) + params["traefik_local"]=true shift 1 ;; -h | --help) @@ -277,6 +278,19 @@ if [ "$is_test" = true ]; then compose_files+=("${path_mapping["$env_argument"]}") fi +# Check for the traefik_prod or traefik_local argument and include traefik base compose + +traefik_enabled=false +if [ "${params["traefik_prod"]}" ] || [ "${params["traefik_local"]}" ]; then + compose_files+=("${path_mapping["traefik"]}") + traefik_enabled=true +fi + +# Add the default nginx configuration if traefik is not used +if [ "$traefik_enabled" = false ]; then + compose_files+=("${path_mapping["nginx_default"]}") +fi + # add all the other ones test_values=("multi_queue" "flower") for value in "${!params[@]}"; do @@ -289,6 +303,7 @@ for value in "${!params[@]}"; do done fi done + # add all the test files if [[ $env_argument == "test" ]]; then for value in "${test_values[@]}"; do @@ -346,4 +361,4 @@ if grep "docker" <<< "$(groups)" > /dev/null 2>&1; then docker compose --project-directory docker ${to_run[@]} -p "$project_name" "$cmd_argument" "$@" else sudo docker compose --project-directory docker ${to_run[@]} -p "$project_name" "$cmd_argument" "$@" -fi \ No newline at end of file +fi diff --git a/tests/api_app/test_api.py b/tests/api_app/test_api.py index 08e02cb540..d6d31f6560 100755 --- a/tests/api_app/test_api.py +++ b/tests/api_app/test_api.py @@ -129,7 +129,7 @@ def test_analyze_file__pcap(self): self.assertEqual(md5, job.md5) self.assertCountEqual( - ["Suricata", "YARAify_File_Scan", "Hfinger"], + ["Suricata", "YARAify_File_Scan", "Hfinger", "DetectItEasy"], list(job.analyzers_to_execute.all().values_list("name", flat=True)), ) diff --git a/tests/api_app/visualizers_manager/quokka/test_observable.py b/tests/api_app/visualizers_manager/quokka/test_observable.py index d19e407247..32116fca1b 100644 --- a/tests/api_app/visualizers_manager/quokka/test_observable.py +++ b/tests/api_app/visualizers_manager/quokka/test_observable.py @@ -918,12 +918,60 @@ def test_observable_all_data(self, *args, **kwargs): self.assertEqual( fourth_page_third_line[0]["columns"], [ - "time_range", - "total_hours_seen", - "total_hits", - "submitters", - "exploits", - "attack_types", + { + "name": "time_range", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": "", + }, + { + "name": "total_hours_seen", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Number of hours when the attacker was active." + " This is a datum about how much time has" + " been active the attacker." + ), + }, + { + "name": "total_hits", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Number of times the attacker tried an attack." + " This a datum about the aggressivity of the attack." + ), + }, + { + "name": "submitters", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": "Sources that reported the attacker.", + }, + { + "name": "exploits", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "If available, " "the CVEs that the attacker tried to exploit." + ), + }, + { + "name": "attack_types", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Category of the attack " + "(protocol, service and type) Example: http/scan" + ), + }, ], ) self.assertEqual( @@ -1230,12 +1278,60 @@ def test_observable_default_data(self, *args, **kwargs): self.assertEqual( fourth_page_third_line[0]["columns"], [ - "time_range", - "total_hours_seen", - "total_hits", - "submitters", - "exploits", - "attack_types", + { + "name": "time_range", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": "", + }, + { + "name": "total_hours_seen", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Number of hours when the attacker was active." + " This is a datum about how much time has" + " been active the attacker." + ), + }, + { + "name": "total_hits", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Number of times the attacker tried an attack." + " This a datum about the aggressivity of the attack." + ), + }, + { + "name": "submitters", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": "Sources that reported the attacker.", + }, + { + "name": "exploits", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "If available, " "the CVEs that the attacker tried to exploit." + ), + }, + { + "name": "attack_types", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Category of the attack " + "(protocol, service and type) Example: http/scan" + ), + }, ], ) self.assertEqual( @@ -1538,12 +1634,60 @@ def test_observable_no_data(self, *args, **kwargs): self.assertEqual( fourth_page_third_line[0]["columns"], [ - "time_range", - "total_hours_seen", - "total_hits", - "submitters", - "exploits", - "attack_types", + { + "name": "time_range", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": "", + }, + { + "name": "total_hours_seen", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Number of hours when the attacker was active." + " This is a datum about how much time has" + " been active the attacker." + ), + }, + { + "name": "total_hits", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Number of times the attacker tried an attack." + " This a datum about the aggressivity of the attack." + ), + }, + { + "name": "submitters", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": "Sources that reported the attacker.", + }, + { + "name": "exploits", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "If available, " "the CVEs that the attacker tried to exploit." + ), + }, + { + "name": "attack_types", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + "description": ( + "Category of the attack " + "(protocol, service and type) Example: http/scan" + ), + }, ], ) self.assertEqual( diff --git a/tests/api_app/visualizers_manager/test_classes.py b/tests/api_app/visualizers_manager/test_classes.py index ecfe166f0c..6a7a17aa46 100644 --- a/tests/api_app/visualizers_manager/test_classes.py +++ b/tests/api_app/visualizers_manager/test_classes.py @@ -17,6 +17,7 @@ VisualizableObject, VisualizablePage, VisualizableTable, + VisualizableTableColumn, VisualizableTitle, VisualizableVerticalList, Visualizer, @@ -24,7 +25,11 @@ from api_app.visualizers_manager.decorators import ( visualizable_error_handler_with_params, ) -from api_app.visualizers_manager.enums import VisualizableColor, VisualizableSize +from api_app.visualizers_manager.enums import ( + VisualizableColor, + VisualizableSize, + VisualizableTableColumnSize, +) from api_app.visualizers_manager.models import VisualizerConfig from tests import CustomTestCase @@ -201,7 +206,7 @@ def test_to_dict_values_empty(self): "alignment": "center", "bold": False, "color": "", - "disable": False, + "disable": True, "icon": "", "italic": False, "link": "", @@ -241,15 +246,33 @@ def test_to_dict(self): ) } ] - columns = ["column_name"] - vvl = VisualizableTable(columns=columns, data=data) + columns = [ + VisualizableTableColumn( + name="column_name", + description="test description", + max_width=VisualizableTableColumnSize.S_300, + disable_filters=True, + disable_sort_by=True, + ), + ] + vvl = VisualizableTable( + columns=columns, data=data, sort_by_desc=True, sort_by_id="column_name" + ) expected_result = { "size": "auto", "alignment": "around", - "columns": ["column_name"], + "columns": [ + { + "name": "column_name", + "max_width": 300, + "description": "test description", + "disable_filters": True, + "disable_sort_by": True, + } + ], "page_size": 5, - "disable_filters": False, - "disable_sort_by": False, + "sort_by_id": "column_name", + "sort_by_desc": True, "type": "table", "data": [ { @@ -273,17 +296,30 @@ def test_to_dict(self): self.assertEqual(vvl.to_dict(), expected_result) def test_to_dict_data_null(self): - columns = ["column_name"] + columns = [ + VisualizableTableColumn( + name="column_name", + description="test description", + ), + ] vvl = VisualizableTable(columns=columns, data=[]) expected_result = { "size": "auto", "alignment": "around", - "columns": ["column_name"], + "columns": [ + { + "name": "column_name", + "max_width": 300, + "description": "test description", + "disable_filters": False, + "disable_sort_by": False, + } + ], "page_size": 5, - "disable_filters": False, - "disable_sort_by": False, "type": "table", "data": [], + "sort_by_id": "", + "sort_by_desc": False, } self.assertCountEqual(vvl.to_dict(), expected_result) @@ -517,3 +553,24 @@ def test_with_error(self): "type": "title", }, ) + + +class VisualizableTableColumnTestCase(CustomTestCase): + def test_to_dict(self): + co = VisualizableTableColumn( + name="id", + description="test description", + max_width=VisualizableTableColumnSize.S_300, + disable_filters=True, + disable_sort_by=True, + ) + result = co.to_dict() + + expected_result = { + "name": "id", + "description": "test description", + "max_width": 300, + "disable_filters": True, + "disable_sort_by": True, + } + self.assertEqual(expected_result, result)