diff --git a/.pylintrc b/.pylintrc index 57e8080eb9..69ed006aef 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,386 +1,687 @@ -[MASTER] +[MAIN] -# Specify a configuration file. -#rcfile= +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS,Makefile,README.md + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). -init-hook='import os, sys, pylint; +#init-hook= - root_dir = os.path.basename(pylint.config.PYLINTRC); +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 - sys.path.append(os.path.join(root_dir, "tools/plist_to_html")); - sys.path.append(os.path.join(root_dir, "analyzer")); - sys.path.append(os.path.join(root_dir, "web")); - sys.path.append(os.path.join(root_dir, "web/build/thrift/v6/gen-py")); - sys.path.append(os.path.join(root_dir, "web/client")); - sys.path.append(os.path.join(root_dir, "web/server"));' +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= # Pickle collected data for later comparisons. persistent=yes -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= +# Resolve imports to .pyi stubs if available. May reduce no-member messages and +# increase not-an-iterable messages. +prefer-stubs=no -# Use multiple processes to speed up Pylint. -jobs=1 +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. -optimize-ast=no +[BASIC] -[MESSAGES CONTROL] +# Naming style matching correct argument names. +argument-naming-style=snake_case -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=all +# Naming style matching correct attribute names. +attr-naming-style=snake_case -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. -enable=logging-format-interpolation,old-style-class,unused-wildcard-import,unused-import,unused-variable,len-as-condition,bad-indentation,unpacking-in-except,import-star-module-level,parameter-unpacking,long-suffix,old-octal-literal,old-ne-operator,backtick,old-raise-syntax,print-statement,unpacking-in-except,import-star-module-level,parameter-unpacking,long-suffix,old-octal-literal,old-ne-operator,backtick,old-raise-syntax,print-statement,not-in-loop,function-redefined,continue-in-finally,abstract-class-instantiated,sstar-needs-assignment-target,duplicate-argument-name,too-many-star-expressions,nonlocal-and-global,return-outside-function,return-arg-in-generator,invalid-star-assignment-target,bad-reversed-sequence,nonexistent-operator,yield-outside-function,init-is-generator,nonlocal-without-binding,invalid-unary-operand-type,unsupported-binary-operation,no-member,not-callable,redundant-keyword-arg,assignment-from-no-return,assignment-from-none,not-context-manager,repeated-keyword,missing-kwoa,no-value-for-parameter,invalid-sequence-index,invalid-slice-index,too-many-function-args,unexpected-keyword-arg,unsupported-membership-test,unpacking-non-sequence,invalid-all-object,no-name-in-module,unbalanced-tuple-unpacking,undefined-variable,undefined-all-variable,used-before-assignment,format-needs-mapping,truncated-format-string,missing-format-string-key,mixed-format-string,too-few-format-args,bad-str-strip-call,too-many-format-args,bad-format-character,access-member-before-definition,method-hidden,assigning-non-slot,duplicate-bases,inconsistent-mro,inherit-non-class,invalid-slots,invalid-slots-object,no-method-argument,no-self-argument,unexpected-special-method-signature,non-iterator-returned,invalid-length-returned,cyclic-import,consider-iterating-dictionary +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= -[REPORTS] +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no +# Naming style matching correct class attribute names. +class-attribute-naming-style=any -# Tells whether to display a full report or only the messages -reports=yes +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -msg-template=[{msg_id}] {path}:{line:3d}:{column}: {msg} +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= +# Naming style matching correct class names. +class-naming-style=PascalCase -[SPELLING] +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE -# List of comma separated words that should not be checked. -spelling-ignore-words= +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no +# Naming style matching correct function names. +function-naming-style=snake_case +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= -[LOGGING] +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no -[FORMAT] +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any -# Maximum number of characters on a single line. -max-line-length=80 +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ +# Naming style matching correct method names. +method-naming-style=snake_case -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator +# Naming style matching correct module names. +module-naming-style=snake_case -# Maximum number of lines in a module -max-module-lines=2000 +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= -[MISCELLANEOUS] +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= -# List of note tags to take in consideration, separated by a comma. -#notes=FIXME,XXX,TODO +# Naming style matching correct variable names. +variable-naming-style=snake_case +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= -[VARIABLES] -# Tells whether we should check for unused import in __init__ files. -init-import=no +[CLASSES] -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls -[BASIC] +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs -# List of builtins function names that should not be used, separated by a comma -bad-functions= -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ +[DESIGN] -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no +# Maximum number of arguments for function / method. +max-args=5 -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ +# Maximum number of attributes for a class (see R0902). +max-attributes=7 -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ +# Maximum number of branch for function / method body. +max-branches=12 -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ +# Maximum number of locals for function / method body. +max-locals=15 -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ +# Maximum number of parents for a class (see R0901). +max-parents=7 -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ +# Maximum number of return / yield for function / method body. +max-returns=6 -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ +# Maximum number of statements in function / method body. +max-statements=50 -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ +[EXCEPTIONS] -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ +[FORMAT] -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ +# Maximum number of characters on a single line. +max-line-length=100 -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ +# Maximum number of lines in a module. +max-module-lines=1000 -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=50 +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging -[ELIF] +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + fixme, + + # We decided to ignore the following style checkers + no-else-return, + no-else-continue, + no-else-break, + + # Due to non-standard directory structure + import-error, + wrong-import-order, + import-outside-toplevel, + + # There really are some too complex codes + too-many-arguments, + too-many-locals, + too-many-branches, + too-many-statements, + too-many-return-statements, + too-many-instance-attributes, + too-many-public-methods, + too-many-lines, + too-many-boolean-expressions, + too-many-nested-blocks, + too-few-public-methods, + + missing-function-docstring, + missing-class-docstring, + missing-module-docstring, + + # TODO: Test code has these global variables + global-variable-undefined, + global-variable-not-assigned, + + duplicate-code, # TODO: Could be eliminated + cyclic-import, # TODO: Could be fixed + subprocess-popen-preexec-fn, # TODO: Could be fixed + broad-exception-caught, # TODO: could be valid + no-member, # TODO: Why is this emitted for multiprocess module? + consider-using-with, # TODO: Mainly for Popen, could be valid + protected-access, # TODO: Used in test code, but not elegant + global-statement, + consider-using-get, # Unnecessary style checker + attribute-defined-outside-init # Too many reports from test codes + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] # Maximum number of nested blocks for function / method body max-nested-blocks=5 +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error -[TYPECHECK] +# Let 'consider-using-join' be raised when the separator to join on would be +# non-empty (resulting in expected fixes of the type: ``"- " + " - +# ".join(items)``) +suggest-join-with-non-empty-separator=yes -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules=ldap,lxml.etree +[REPORTS] -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). This supports can work -# with qualified names. -ignored-classes=BuildAction +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes [SIMILARITIES] +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + # Minimum lines number of a similarity. min-similarity-lines=4 -# Ignore comments when computing similarities. -ignore-comments=yes -# Ignore docstrings when computing similarities. -ignore-docstrings=yes +[SPELLING] -# Ignore imports when computing similarities. -ignore-imports=no +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= -[DESIGN] +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: -# Maximum number of arguments for function / method -max-args=8 +# List of comma separated words that should not be checked. +spelling-ignore-words= -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= -# Maximum number of locals for function / method body -max-locals=20 +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no -# Maximum number of return / yield for function / method body -max-returns=6 -# Maximum number of branch for function / method body -max-branches=12 +[STRING] -# Maximum number of statements in function / method body -max-statements=50 +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no -# Maximum number of parents for a class (see R0901). -max-parents=7 +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no -# Maximum number of attributes for a class (see R0902). -max-attributes=7 -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 +[TYPECHECK] -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager -# Maximum number of boolean expressions in an if statement -max-bool-expr=5 +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes -[IMPORTS] +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 -[CLASSES] +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp +# List of decorators that change the signature of a decorated function. +signature-mutators= -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs +[VARIABLES] -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes -[EXCEPTIONS] +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/Makefile b/Makefile index 39c2a6e24b..3edbfd76ec 100644 --- a/Makefile +++ b/Makefile @@ -164,7 +164,7 @@ PYLINT_CMD = $(MAKE) -C $(CC_ANALYZER) pylint && \ $(MAKE) -C $(CC_WEB) pylint && \ pylint -j0 ./bin/** ./codechecker_common \ ./scripts/** ./scripts/build/** ./scripts/debug_tools/** \ - ./scripts/gerrit_jenkins/** ./scripts/resources/** \ + ./scripts/resources/** \ ./scripts/test/** ./scripts/thrift/** \ --rcfile=$(ROOT)/.pylintrc diff --git a/analyzer/codechecker_analyzer/analysis_manager.py b/analyzer/codechecker_analyzer/analysis_manager.py index 101f97e54c..6b22ca4231 100644 --- a/analyzer/codechecker_analyzer/analysis_manager.py +++ b/analyzer/codechecker_analyzer/analysis_manager.py @@ -5,8 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- -""" -""" import glob @@ -116,14 +114,14 @@ def worker_result_handler(results, metadata_tool, output_path): # Progress reporting. -progress_checked_num = None -progress_actions = None +PROGRESS_CHECKED_NUM = None +PROGRESS_ACTIONS = None def init_worker(checked_num, action_num): - global progress_checked_num, progress_actions - progress_checked_num = checked_num - progress_actions = action_num + global PROGRESS_CHECKED_NUM, PROGRESS_ACTIONS + PROGRESS_CHECKED_NUM = checked_num + PROGRESS_ACTIONS = action_num def save_output(base_file_name, out, err): @@ -172,8 +170,7 @@ def prepare_check(action, analyzer_config, output_dir, """ Construct the source analyzer and result handler. """ # Create a source analyzer. source_analyzer = \ - analyzer_types.construct_analyzer(action, - analyzer_config) + analyzer_types.construct_analyzer(action, analyzer_config) if disable_ctu: # WARNING! can be called only on ClangSA @@ -334,8 +331,8 @@ def handle_failure( # from the standard output by this postprocess phase so we can present them # as CodeChecker reports. checks = source_analyzer.config_handler.checks() - state = checks.get('clang-diagnostic-error', (CheckerState.enabled, ''))[0] - if state == CheckerState.enabled: + state = checks.get('clang-diagnostic-error', (CheckerState.ENABLED, ''))[0] + if state == CheckerState.ENABLED: rh.postprocess_result(skip_handlers, rs_handler) # Remove files that successfully analyzed earlier on. @@ -507,7 +504,7 @@ def check(check_data): result_file = '' if analyzer_config is None: - raise Exception("Analyzer configuration is missing.") + raise ValueError("Analyzer configuration is missing.") source_analyzer, rh = prepare_check(action, analyzer_config, output_dir, @@ -539,7 +536,7 @@ def __create_timeout(analyzer_process): timeout_cleanup[0] = setup_process_timeout( analyzer_process, analysis_timeout) else: - def __create_timeout(analyzer_process): + def __create_timeout(_): # If no timeout is given by the client, this callback # shouldn't do anything. pass @@ -555,9 +552,9 @@ def __create_timeout(analyzer_process): "of %d seconds.", analysis_timeout) LOG.warning("Considering this analysis as failed...") rh.analyzer_returncode = -1 - rh.analyzer_stderr = (">>> CodeChecker: Analysis timed out " - "after {0} seconds. <<<\n{1}") \ - .format(analysis_timeout, rh.analyzer_stderr) + rh.analyzer_stderr = \ + ">>> CodeChecker: Analysis timed out after " \ + f"{analysis_timeout} seconds. <<<\n{rh.analyzer_stderr}" source_analyzer.post_analyze(rh) @@ -619,7 +616,7 @@ def handle_analysis_result(success, zip_file=zip_file): if rh.analyzer_returncode == 0: handle_analysis_result(success=True) LOG.info("[%d/%d] %s analyzed %s successfully.", - progress_checked_num.value, progress_actions.value, + PROGRESS_CHECKED_NUM.value, PROGRESS_ACTIONS.value, action.analyzer_type, source_file_name) if result_file_exists: @@ -660,8 +657,8 @@ def handle_analysis_result(success, zip_file=zip_file): LOG.info("[%d/%d] %s analyzed %s without" " CTU successfully.", - progress_checked_num.value, - progress_actions.value, + PROGRESS_CHECKED_NUM.value, + PROGRESS_ACTIONS.value, action.analyzer_type, source_file_name) @@ -688,7 +685,7 @@ def handle_analysis_result(success, zip_file=zip_file): LOG.debug_analyzer('\n%s', rh.analyzer_stdout) LOG.debug_analyzer('\n%s', rh.analyzer_stderr) - progress_checked_num.value += 1 + PROGRESS_CHECKED_NUM.value += 1 return return_codes, False, reanalyzed, action.analyzer_type, \ result_file, action.source @@ -731,10 +728,8 @@ def start_workers(actions_map, actions, analyzer_config_map, Start the workers in the process pool. For every build action there is worker which makes the analysis. """ - # pylint: disable=no-member multiprocess module members. - # Handle SIGINT to stop this script running. - def signal_handler(signum, frame): + def signal_handler(signum, _): try: pool.terminate() pool.join() diff --git a/analyzer/codechecker_analyzer/analyzer.py b/analyzer/codechecker_analyzer/analyzer.py index 55f51f80ce..062c770731 100644 --- a/analyzer/codechecker_analyzer/analyzer.py +++ b/analyzer/codechecker_analyzer/analyzer.py @@ -126,7 +126,7 @@ def __has_enabled_checker(ch: AnalyzerConfigHandler): Returns True if at least one checker is enabled in the given config handler. """ - return any(state == CheckerState.enabled + return any(state == CheckerState.ENABLED for _, (state, _) in ch.checks().items()) @@ -137,8 +137,6 @@ def perform_analysis(args, skip_handlers, rs_handler: ReviewStatusHandler, in the given analysis context for the supplied build actions. Additionally, insert statistical information into the metadata dict. """ - # pylint: disable=no-member multiprocess module members. - context = analyzer_context.get_context() ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \ @@ -246,8 +244,8 @@ def perform_analysis(args, skip_handlers, rs_handler: ReviewStatusHandler, for check, data in config_map[analyzer].checks().items(): state, _ = data metadata_info['checkers'].update({ - check: state == CheckerState.enabled}) - if state == CheckerState.enabled: + check: state == CheckerState.ENABLED}) + if state == CheckerState.ENABLED: enabled_checkers[analyzer].append(check) # TODO: cppcheck may require a different environment than clang. diff --git a/analyzer/codechecker_analyzer/analyzer_context.py b/analyzer/codechecker_analyzer/analyzer_context.py index f4176285a6..7fd15a8a8a 100644 --- a/analyzer/codechecker_analyzer/analyzer_context.py +++ b/analyzer/codechecker_analyzer/analyzer_context.py @@ -10,7 +10,7 @@ """ -# pylint: disable=no-name-in-module +# pylint: disable=deprecated-module from distutils.spawn import find_executable from argparse import ArgumentTypeError @@ -20,8 +20,8 @@ from pathlib import Path -from codechecker_common import logger from codechecker_analyzer.arg import analyzer_binary +from codechecker_common import logger from codechecker_common.checker_labels import CheckerLabels from codechecker_common.singleton import Singleton from codechecker_common.util import load_json @@ -92,7 +92,7 @@ def __init__(self): self.__populate_analyzers() self.__populate_replacer() - def __parse_CC_ANALYZER_BIN(self): + def __parse_cc_analyzer_bin(self): env_var_bins = {} if 'CC_ANALYZER_BIN' in self.analyzer_env: had_error = False @@ -202,7 +202,7 @@ def __populate_analyzers(self): if not analyzer_from_path: analyzer_env = self.analyzer_env - env_var_bin = self.__parse_CC_ANALYZER_BIN() + env_var_bin = self.__parse_cc_analyzer_bin() compiler_binaries = self.pckg_layout.get('analyzers') for name, value in compiler_binaries.items(): diff --git a/analyzer/codechecker_analyzer/analyzers/analyzer_base.py b/analyzer/codechecker_analyzer/analyzers/analyzer_base.py index 2919666fa5..d6d92b5aa8 100644 --- a/analyzer/codechecker_analyzer/analyzers/analyzer_base.py +++ b/analyzer/codechecker_analyzer/analyzers/analyzer_base.py @@ -56,8 +56,9 @@ def resolve_missing_binary(cls, configured_binary, environ): """ raise NotImplementedError("Subclasses should implement this!") + @classmethod @abstractmethod - def get_binary_version(self, environ, details=False) -> str: + def get_binary_version(cls, environ, details=False) -> str: """ Return the version number of the binary that CodeChecker found, even if its incompatible. If details is true, additional version information @@ -138,7 +139,6 @@ def post_analyze(self, result_handler): """ Run immediately after the analyze function. """ - pass @staticmethod def run_proc(command, cwd=None, proc_callback=None, env=None): @@ -147,7 +147,7 @@ def run_proc(command, cwd=None, proc_callback=None, env=None): and the stdout and stderr outputs of the process. """ - def signal_handler(signum, frame): + def signal_handler(signum, _): # Clang does not kill its child processes, so I have to. try: g_pid = proc.pid diff --git a/analyzer/codechecker_analyzer/analyzers/analyzer_types.py b/analyzer/codechecker_analyzer/analyzers/analyzer_types.py index 4dc425530f..6e8a492ecf 100644 --- a/analyzer/codechecker_analyzer/analyzers/analyzer_types.py +++ b/analyzer/codechecker_analyzer/analyzers/analyzer_types.py @@ -209,24 +209,17 @@ def check_supported_analyzers(analyzers): return enabled_analyzers, failed_analyzers -def construct_analyzer(buildaction, - analyzer_config): - try: - analyzer_type = buildaction.analyzer_type - - LOG.debug_analyzer('Constructing %s analyzer.', analyzer_type) - if analyzer_type in supported_analyzers: - analyzer = supported_analyzers[analyzer_type](analyzer_config, - buildaction) - else: - analyzer = None - LOG.error('Unsupported analyzer type: %s', analyzer_type) - return analyzer - - except Exception: - # We should've detected well before this point that something is off - # with the analyzer. We can't recover here. - raise +def construct_analyzer(buildaction, analyzer_config): + analyzer_type = buildaction.analyzer_type + + LOG.debug_analyzer('Constructing %s analyzer.', analyzer_type) + if analyzer_type in supported_analyzers: + analyzer = \ + supported_analyzers[analyzer_type](analyzer_config, buildaction) + else: + analyzer = None + LOG.error('Unsupported analyzer type: %s', analyzer_type) + return analyzer def build_config_handlers(args, enabled_analyzers): diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py b/analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py index befcfb7e72..6bb89697c7 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/analyzer.py @@ -121,7 +121,7 @@ class ClangSA(analyzer_base.SourceAnalyzer): __ctu_autodetection = None def __init__(self, cfg_handler, buildaction): - super(ClangSA, self).__init__(cfg_handler, buildaction) + super().__init__(cfg_handler, buildaction) self.__disable_ctu = False self.__checker_configs = [] self.__disabled_checkers = [] @@ -171,17 +171,17 @@ def __add_plugin_load_flags(cls, analyzer_cmd: List[str]): analyzer_cmd.extend(["-load", plugin]) @classmethod - def get_binary_version(self, environ, details=False) -> str: + def get_binary_version(cls, environ, details=False) -> str: # No need to LOG here, we will emit a warning later anyway. - if not self.analyzer_binary(): + if not cls.analyzer_binary(): return None if details: - version = [self.analyzer_binary(), '--version'] + ver = [cls.analyzer_binary(), '--version'] else: - version = [self.analyzer_binary(), '-dumpversion'] + ver = [cls.analyzer_binary(), '-dumpversion'] try: - output = subprocess.check_output(version, + output = subprocess.check_output(ver, env=environ, universal_newlines=True, encoding="utf-8", @@ -189,7 +189,7 @@ def get_binary_version(self, environ, details=False) -> str: return output.strip() except (subprocess.CalledProcessError, OSError) as oerr: LOG.warning("Failed to get analyzer version: %s", - ' '.join(version)) + ' '.join(ver)) LOG.warning(oerr) return None @@ -405,9 +405,9 @@ def construct_analyzer_cmd(self, result_handler): enabled_checkers = [] for checker_name, value in config.checks().items(): state, _ = value - if state == CheckerState.enabled: + if state == CheckerState.ENABLED: enabled_checkers.append(checker_name) - elif state == CheckerState.disabled: + elif state == CheckerState.DISABLED: self.__disabled_checkers.append(checker_name) if enabled_checkers: diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/config_handler.py b/analyzer/codechecker_analyzer/analyzers/clangsa/config_handler.py index cb30dd83c8..a6c7507bb8 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/config_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/config_handler.py @@ -21,7 +21,7 @@ class ClangSAConfigHandler(config_handler.AnalyzerConfigHandler): """ def __init__(self, environ): - super(ClangSAConfigHandler, self).__init__() + super().__init__() self.ctu_dir = '' self.ctu_on_demand = False self.enable_z3 = False diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_autodetection.py b/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_autodetection.py index 0915fbf9b4..dacf46fa90 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_autodetection.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_autodetection.py @@ -120,21 +120,21 @@ def __init__(self, analyzer_binary, environ): LOG.debug( 'Trying to detect CTU capability, but analyzer binary is not ' 'set!') - return None + return analyzer_version = invoke_binary_checked( self.__analyzer_binary, ['--version'], self.environ) if analyzer_version is False: LOG.debug('Failed to invoke command to get Clang version!') - return None + return version_parser = version.ClangVersionInfoParser(self.__analyzer_binary) version_info = version_parser.parse(analyzer_version) if not version_info: LOG.debug('Failed to parse Clang version information!') - return None + return self.__analyzer_version_info = version_info diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_manager.py b/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_manager.py index 52254b6cd8..77caae42d9 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_manager.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_manager.py @@ -30,6 +30,9 @@ LOG = get_logger('analyzer') +# The inheritence comes from the YAML parser, we can't solve it with less +# ancestors. +# pylint: disable=too-many-ancestors class LLVMComatibleYamlDumper(Dumper): def check_simple_key(self): """ Mark every keys as simple keys. diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_triple_arch.py b/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_triple_arch.py index e5f14c9150..3d2650c20f 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_triple_arch.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/ctu_triple_arch.py @@ -62,6 +62,8 @@ def _find_arch_in_command(output): except ValueError: pass + return None + def get_triple_arch(action, source, config): """Returns the architecture part of the target triple for the given diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/result_handler.py b/analyzer/codechecker_analyzer/analyzers/clangsa/result_handler.py index 5d1e67f232..4585e7c09a 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/result_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/result_handler.py @@ -32,8 +32,7 @@ class ClangSAResultHandler(ResultHandler): def __init__(self, *args, **kwargs): self.analyzer_info = AnalyzerInfo(name='clangsa') - - super(ClangSAResultHandler, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def postprocess_result( self, diff --git a/analyzer/codechecker_analyzer/analyzers/clangsa/version.py b/analyzer/codechecker_analyzer/analyzers/clangsa/version.py index 3749766b93..38f7578384 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangsa/version.py +++ b/analyzer/codechecker_analyzer/analyzers/clangsa/version.py @@ -51,7 +51,7 @@ def parse(self, version_string): """Try to parse the version string using the predefined patterns.""" version_match = re.search(self.clang_version_pattern, version_string) if not version_match: - return + return None installed_dir_match = re.search( self.clang_installed_dir_pattern, version_string) diff --git a/analyzer/codechecker_analyzer/analyzers/clangtidy/analyzer.py b/analyzer/codechecker_analyzer/analyzers/clangtidy/analyzer.py index ded56b34d3..a03d372727 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangtidy/analyzer.py +++ b/analyzer/codechecker_analyzer/analyzers/clangtidy/analyzer.py @@ -32,7 +32,7 @@ from ..flag import prepend_all from . import config_handler -from . import result_handler +from . import result_handler as clangtidy_result_handler LOG = get_logger('analyzer') @@ -49,8 +49,10 @@ def parse_checkers(tidy_output): line = line.strip() if line.startswith('Enabled checks:') or line == '': continue - elif line.startswith('clang-analyzer-'): + + if line.startswith('clang-analyzer-'): continue + match = pattern.match(line) if match: checkers.append((match.group(0), '')) @@ -66,10 +68,10 @@ def parse_checker_config_old(config_dump): config_dump -- clang-tidy config options YAML dump in pre-LLVM15 format. """ reg = re.compile(r'key:\s+(\S+)\s+value:\s+([^\n]+)') - all = re.findall(reg, config_dump) + result = re.findall(reg, config_dump) # tidy emits the checker option with a "." prefix, but we need a ":" - all = [(option[0].replace(".", ":"), option[1]) for option in all] - return all + result = [(option[0].replace(".", ":"), option[1]) for option in result] + return result def parse_checker_config_new(config_dump): @@ -137,8 +139,10 @@ def get_diagtool_bin(): LOG.debug("'diagtool' can not be found next to the clang binary (%s)!", clang_bin) + return None -def get_warnings(env=None): + +def get_warnings(environment=None): """ Returns list of warning flags by using diagtool. """ @@ -150,7 +154,7 @@ def get_warnings(env=None): try: result = subprocess.check_output( [diagtool_bin, 'tree'], - env=env, + env=environment, universal_newlines=True, encoding="utf-8", errors="ignore") @@ -164,8 +168,6 @@ def get_warnings(env=None): exc.output) raise - except OSError: - raise def _add_asterisk_for_group( @@ -215,6 +217,7 @@ def parse_version(tidy_output): match = version_re.match(tidy_output) if match: return match.group('version') + return None class ClangTidy(analyzer_base.SourceAnalyzer): @@ -233,12 +236,12 @@ def analyzer_binary(cls): .analyzer_binaries[cls.ANALYZER_NAME] @classmethod - def get_binary_version(self, environ, details=False) -> str: + def get_binary_version(cls, environ, details=False) -> str: # No need to LOG here, we will emit a warning later anyway. - if not self.analyzer_binary(): + if not cls.analyzer_binary(): return None - version = [self.analyzer_binary(), '--version'] + version = [cls.analyzer_binary(), '--version'] try: output = subprocess.check_output(version, env=environ, @@ -255,7 +258,7 @@ def get_binary_version(self, environ, details=False) -> str: return None - def add_checker_config(self, checker_cfg): + def add_checker_config(self, _): LOG.error("Not implemented yet") @classmethod @@ -349,8 +352,8 @@ def get_checker_list(self, config) -> Tuple[List[str], List[str]]: # enable a compiler warning, we first have to undo the -checks level # disable and then enable it, so we need both # -checks=compiler-diagnostic- and -W. - compiler_warnings = list() - enabled_checkers = list() + compiler_warnings = [] + enabled_checkers = [] has_checker_config = \ config.checker_config and config.checker_config != '{}' @@ -370,16 +373,16 @@ def get_checker_list(self, config) -> Tuple[List[str], List[str]]: if warning_name is not None: # -W and clang-diagnostic- are added as compiler warnings. - if warning_type == CheckerType.compiler: + if warning_type == CheckerType.COMPILER: LOG.warning("As of CodeChecker v6.22, the following usage" f"of '{checker_name}' compiler warning as a " "checker name is deprecated, please use " f"'clang-diagnostic-{checker_name[1:]}' " "instead.") - if state == CheckerState.enabled: + if state == CheckerState.ENABLED: compiler_warnings.append('-W' + warning_name) enabled_checkers.append(checker_name) - elif state == CheckerState.disabled: + elif state == CheckerState.DISABLED: if config.enable_all: LOG.warning("Disabling compiler warning with " f"compiler flag '-d W{warning_name}' " @@ -388,8 +391,8 @@ def get_checker_list(self, config) -> Tuple[List[str], List[str]]: # warning as -W..., if it is disabled, tidy can suppress when # specified in the -checks parameter list, so we add it there # as -clang-diagnostic-... . - elif warning_type == CheckerType.analyzer: - if state == CheckerState.enabled: + elif warning_type == CheckerType.ANALYZER: + if state == CheckerState.ENABLED: compiler_warnings.append('-W' + warning_name) enabled_checkers.append(checker_name) else: @@ -397,7 +400,7 @@ def get_checker_list(self, config) -> Tuple[List[str], List[str]]: continue - if state == CheckerState.enabled: + if state == CheckerState.ENABLED: enabled_checkers.append(checker_name) # By default all checkers are disabled and the enabled ones are added @@ -448,7 +451,7 @@ def construct_analyzer_cmd(self, result_handler): # The invocation should end in a Popen call with shell=False, # so no globbing should occur even if the checks argument # contains characters that would trigger globbing in the shell. - analyzer_cmd.append("-checks=%s" % ','.join(checks)) + analyzer_cmd.append(f"-checks={','.join(checks)}") analyzer_cmd.extend(config.analyzer_extra_arguments) @@ -567,7 +570,7 @@ def construct_result_handler(self, buildaction, report_output, See base class for docs. """ report_hash = self.config_handler.report_hash - res_handler = result_handler.ClangTidyResultHandler( + res_handler = clangtidy_result_handler.ClangTidyResultHandler( buildaction, report_output, report_hash) res_handler.skiplist_handler = skiplist_handler diff --git a/analyzer/codechecker_analyzer/analyzers/clangtidy/config_handler.py b/analyzer/codechecker_analyzer/analyzers/clangtidy/config_handler.py index 35a0d445ff..68625e126a 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangtidy/config_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/clangtidy/config_handler.py @@ -29,11 +29,8 @@ class ClangTidyConfigHandler(AnalyzerConfigHandler): Configuration handler for Clang-tidy analyzer. """ - def __init__(self): - super(ClangTidyConfigHandler, self).__init__() - def add_checker(self, checker_name, description='', - state=CheckerState.disabled): + state=CheckerState.DISABLED): """ Add additional checker if the 'take-config-from-directory' analyzer configuration option is not set. @@ -43,8 +40,7 @@ def add_checker(self, checker_name, description='', if is_compiler_warning(checker_name): return - super(ClangTidyConfigHandler, self).add_checker(checker_name, - description, state) + super().add_checker(checker_name, description, state) def set_checker_enabled(self, checker_name, enabled=True): """ @@ -54,5 +50,4 @@ def set_checker_enabled(self, checker_name, enabled=True): checker_name.startswith('clang-diagnostic'): self.add_checker(checker_name) - super(ClangTidyConfigHandler, self).set_checker_enabled(checker_name, - enabled) + super().set_checker_enabled(checker_name, enabled) diff --git a/analyzer/codechecker_analyzer/analyzers/clangtidy/result_handler.py b/analyzer/codechecker_analyzer/analyzers/clangtidy/result_handler.py index 952a038e1b..156d773d93 100644 --- a/analyzer/codechecker_analyzer/analyzers/clangtidy/result_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/clangtidy/result_handler.py @@ -35,7 +35,7 @@ class ClangTidyResultHandler(ResultHandler): def __init__(self, *args, **kwargs): self.analyzer_info = AnalyzerInfo(name=AnalyzerResult.TOOL_NAME) - super(ClangTidyResultHandler, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def postprocess_result( self, diff --git a/analyzer/codechecker_analyzer/analyzers/config_handler.py b/analyzer/codechecker_analyzer/analyzers/config_handler.py index e575c5ea57..e4c45eb9f0 100644 --- a/analyzer/codechecker_analyzer/analyzers/config_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/config_handler.py @@ -28,16 +28,16 @@ # command-line, or belongs to a profile explicitly disabled on the # command-line, then it is considered to have a CheckerState of disabled. class CheckerState(Enum): - disabled = 1 - enabled = 2 + DISABLED = 1 + ENABLED = 2 class CheckerType(Enum): """ Checker type. """ - analyzer = 0 # A checker which is not a compiler warning. - compiler = 1 # A checker which specified as "-W" or "-Wno-". + ANALYZER = 0 # A checker which is not a compiler warning. + COMPILER = 1 # A checker which specified as "-W" or "-Wno-". def get_compiler_warning_name_and_type(checker_name): @@ -52,11 +52,11 @@ def get_compiler_warning_name_and_type(checker_name): if checker_name.startswith('W'): name = checker_name[4:] if \ checker_name.startswith('Wno-') else checker_name[1:] - return name, CheckerType.compiler + return name, CheckerType.COMPILER elif checker_name.startswith('clang-diagnostic-'): - return checker_name[17:], CheckerType.analyzer + return checker_name[17:], CheckerType.ANALYZER else: - return None, CheckerType.analyzer + return None, CheckerType.ANALYZER class AnalyzerConfigHandler(metaclass=ABCMeta): @@ -77,7 +77,7 @@ def __init__(self): self.__available_checkers = collections.OrderedDict() def add_checker(self, checker_name, description='', - state=CheckerState.disabled): + state=CheckerState.DISABLED): """ Add a checker to the available checkers' list. """ @@ -93,8 +93,8 @@ def set_checker_enabled(self, checker_name, enabled=True): if ch_name.startswith(checker_name) or \ ch_name.endswith(checker_name): _, description = values - state = CheckerState.enabled if enabled \ - else CheckerState.disabled + state = CheckerState.ENABLED if enabled \ + else CheckerState.DISABLED changed_states.append((ch_name, state, description)) # Enabled/disable checkers are stored in an ordered dict. When a @@ -137,7 +137,7 @@ def __gen_name_variations(self): def initialize_checkers(self, checkers, - cmdline_enable=[], + cmdline_enable=None, enable_all=False): """ Add checkers and set their "enabled/disabled" status. The following @@ -161,6 +161,9 @@ def initialize_checkers(self, enable_all -- Boolean value whether "--enable-all" is given. """ + if cmdline_enable is None: + cmdline_enable = [] + checker_labels = analyzer_context.get_context().checker_labels # Add all checkers marked as disabled. diff --git a/analyzer/codechecker_analyzer/analyzers/cppcheck/analyzer.py b/analyzer/codechecker_analyzer/analyzers/cppcheck/analyzer.py index 9b7bc4e58a..cc4e187c89 100644 --- a/analyzer/codechecker_analyzer/analyzers/cppcheck/analyzer.py +++ b/analyzer/codechecker_analyzer/analyzers/cppcheck/analyzer.py @@ -11,6 +11,7 @@ from collections import defaultdict # TODO distutils will be removed in python3.12 +# pylint: disable=deprecated-module from distutils.version import StrictVersion from pathlib import Path import os @@ -68,6 +69,7 @@ def parse_version(cppcheck_output): match = version_re.match(cppcheck_output) if match: return match.group('version') + return None class Cppcheck(analyzer_base.SourceAnalyzer): @@ -83,12 +85,12 @@ def analyzer_binary(cls): .analyzer_binaries[cls.ANALYZER_NAME] @classmethod - def get_binary_version(self, environ, details=False) -> str: + def get_binary_version(cls, environ, details=False) -> str: """ Get analyzer version information. """ # No need to LOG here, we will emit a warning later anyway. - if not self.analyzer_binary(): + if not cls.analyzer_binary(): return None - version = [self.analyzer_binary(), '--version'] + version = [cls.analyzer_binary(), '--version'] try: output = subprocess.check_output(version, env=environ, @@ -105,7 +107,7 @@ def get_binary_version(self, environ, details=False) -> str: return None - def add_checker_config(self, checker_cfg): + def add_checker_config(self, _): LOG.error("Checker configuration for Cppcheck is not implemented yet") def get_analyzer_mentioned_files(self, output): @@ -114,7 +116,6 @@ def get_analyzer_mentioned_files(self, output): its standard outputs, which should be analyzer_stdout or analyzer_stderr from a result handler. """ - pass def parse_analyzer_config(self): """ @@ -179,7 +180,7 @@ def construct_analyzer_cmd(self, result_handler): analyzer_cmd.append('--enable=all') for checker_name, value in config.checks().items(): - if value[0] == CheckerState.disabled: + if value[0] == CheckerState.DISABLED: # TODO python3.9 removeprefix method would be nicer # than startswith and a hardcoded slicing if checker_name.startswith("cppcheck-"): @@ -273,16 +274,17 @@ def get_checker_config(cls): """ return [] - def analyze(self, analyzer_cmd, res_handler, proc_callback=None): - env = None + def analyze(self, analyzer_cmd, res_handler, proc_callback=None, _=None): + environment = None original_env_file = os.environ.get( 'CODECHECKER_ORIGINAL_BUILD_ENV') if original_env_file: with open(original_env_file, 'rb') as env_file: - env = pickle.load(env_file, encoding='utf-8') + environment = pickle.load(env_file, encoding='utf-8') - return super().analyze(analyzer_cmd, res_handler, proc_callback, env) + return super().analyze( + analyzer_cmd, res_handler, proc_callback, environment) def post_analyze(self, result_handler): """ @@ -315,7 +317,7 @@ def post_analyze(self, result_handler): LOG.error(e.errno) @classmethod - def resolve_missing_binary(cls, configured_binary, env): + def resolve_missing_binary(cls, configured_binary, environ): """ In case of the configured binary for the analyzer is not found in the PATH, this method is used to find a callable binary. @@ -330,7 +332,7 @@ def resolve_missing_binary(cls, configured_binary, env): cppcheck = get_binary_in_path(['cppcheck'], r'^cppcheck(-\d+(\.\d+){0,2})?$', - env) + environ) if cppcheck: LOG.debug("Using '%s' for Cppcheck!", cppcheck) @@ -407,7 +409,7 @@ def construct_config_handler(cls, args): # in the label file. checker_labels = context.checker_labels checkers_from_label = checker_labels.checkers("cppcheck") - parsed_set = set([data[0] for data in checkers]) + parsed_set = set(data[0] for data in checkers) for checker in set(checkers_from_label): if checker not in parsed_set: checkers.append((checker, "")) diff --git a/analyzer/codechecker_analyzer/analyzers/cppcheck/config_handler.py b/analyzer/codechecker_analyzer/analyzers/cppcheck/config_handler.py index 11c41ce19d..297e36cd9c 100644 --- a/analyzer/codechecker_analyzer/analyzers/cppcheck/config_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/cppcheck/config_handler.py @@ -15,4 +15,3 @@ class CppcheckConfigHandler(config_handler.AnalyzerConfigHandler): """ Configuration handler for Cppcheck analyzer. """ - pass diff --git a/analyzer/codechecker_analyzer/analyzers/cppcheck/result_handler.py b/analyzer/codechecker_analyzer/analyzers/cppcheck/result_handler.py index 6cf7e40a0e..68c28b02a9 100644 --- a/analyzer/codechecker_analyzer/analyzers/cppcheck/result_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/cppcheck/result_handler.py @@ -34,7 +34,7 @@ class CppcheckResultHandler(ResultHandler): def __init__(self, *args, **kwargs): self.analyzer_info = AnalyzerInfo(name=AnalyzerResult.TOOL_NAME) - super(CppcheckResultHandler, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def postprocess_result( self, diff --git a/analyzer/codechecker_analyzer/analyzers/gcc/analyzer.py b/analyzer/codechecker_analyzer/analyzers/gcc/analyzer.py index fa5f134406..6ec23aa7fd 100644 --- a/analyzer/codechecker_analyzer/analyzers/gcc/analyzer.py +++ b/analyzer/codechecker_analyzer/analyzers/gcc/analyzer.py @@ -5,10 +5,9 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- -""" -""" from collections import defaultdict # TODO distutils will be removed in python3.12 +# pylint: disable=deprecated-module from distutils.version import StrictVersion import os import pickle @@ -46,12 +45,10 @@ def add_checker_config(self, checker_cfg): # TODO pass - @classmethod def get_analyzer_mentioned_files(self, output): """ This is mostly used for CTU, which is absent in GCC. """ - pass def construct_analyzer_cmd(self, result_handler): """ @@ -75,7 +72,7 @@ def construct_analyzer_cmd(self, result_handler): analyzer_cmd.append('-fdiagnostics-format=sarif-stderr') for checker_name, value in config.checks().items(): - if value[0] == CheckerState.disabled: + if value[0] == CheckerState.DISABLED: # TODO python3.9 removeprefix method would be nicer # than startswith and a hardcoded slicing analyzer_cmd.append( @@ -93,11 +90,11 @@ def construct_analyzer_cmd(self, result_handler): return analyzer_cmd @classmethod - def get_analyzer_checkers(self): + def get_analyzer_checkers(cls): """ Return the list of the supported checkers. """ - command = [self.analyzer_binary(), "--help=warning"] + command = [cls.analyzer_binary(), "--help=warning"] checker_list = [] try: @@ -137,7 +134,7 @@ def get_checker_config(cls): # TODO return [] - def analyze(self, analyzer_cmd, res_handler, proc_callback=None): + def analyze(self, analyzer_cmd, res_handler, proc_callback=None, _=None): env = None original_env_file = os.environ.get( @@ -157,29 +154,27 @@ def post_analyze(self, result_handler: GccResultHandler): The report parsing of the Parse command is done recursively. """ - pass @classmethod - def resolve_missing_binary(cls, configured_binary, env): + def resolve_missing_binary(cls, configured_binary, environ): """ In case of the configured binary for the analyzer is not found in the PATH, this method is used to find a callable binary. """ # TODO - pass @classmethod - def get_binary_version(self, environ, details=False) -> str: + def get_binary_version(cls, environ, details=False) -> str: """ Return the analyzer version. """ # No need to LOG here, we will emit a warning later anyway. - if not self.analyzer_binary(): + if not cls.analyzer_binary(): return None if details: - version = [self.analyzer_binary(), '--version'] + version = [cls.analyzer_binary(), '--version'] else: - version = [self.analyzer_binary(), '-dumpfullversion'] + version = [cls.analyzer_binary(), '-dumpfullversion'] try: output = subprocess.check_output(version, env=environ, diff --git a/analyzer/codechecker_analyzer/analyzers/gcc/config_handler.py b/analyzer/codechecker_analyzer/analyzers/gcc/config_handler.py index 0e88e50ee6..e87117ff13 100644 --- a/analyzer/codechecker_analyzer/analyzers/gcc/config_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/gcc/config_handler.py @@ -15,4 +15,3 @@ class GccConfigHandler(config_handler.AnalyzerConfigHandler): """ Configuration handler for Gcc analyzer. """ - pass diff --git a/analyzer/codechecker_analyzer/analyzers/gcc/result_handler.py b/analyzer/codechecker_analyzer/analyzers/gcc/result_handler.py index 2408017083..525caea52b 100644 --- a/analyzer/codechecker_analyzer/analyzers/gcc/result_handler.py +++ b/analyzer/codechecker_analyzer/analyzers/gcc/result_handler.py @@ -52,7 +52,7 @@ def __init__(self, *args, **kwargs): self.analyzer_info = AnalyzerInfo(name=AnalyzerResult.TOOL_NAME) self.gcc_analyzer_result = AnalyzerResult() - super(GccResultHandler, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def postprocess_result( self, @@ -77,7 +77,7 @@ def postprocess_result( os.path.basename(self.analyzed_source_file) + self.buildaction_hash + ".sarif")) - with open(gcc_dest_file_name, 'w') as f: + with open(gcc_dest_file_name, 'w', encoding="utf-8") as f: f.write(gcc_stderr) assert os.path.exists(gcc_dest_file_name) diff --git a/analyzer/codechecker_analyzer/analyzers/result_handler_base.py b/analyzer/codechecker_analyzer/analyzers/result_handler_base.py index 35bc4836fd..2e35ce3561 100644 --- a/analyzer/codechecker_analyzer/analyzers/result_handler_base.py +++ b/analyzer/codechecker_analyzer/analyzers/result_handler_base.py @@ -74,8 +74,6 @@ def __init__(self, action, workspace, report_hash_type=None): @property def buildaction(self): - """ - """ return self.__buildaction @property @@ -193,10 +191,8 @@ def postprocess_result( Postprocess result if needed. Should be called after the analyses finished. """ - pass def handle_results(self, client): """ Handle the results and return report statistics. """ - pass diff --git a/analyzer/codechecker_analyzer/arg.py b/analyzer/codechecker_analyzer/arg.py index 6d49cecabf..86fa1e1670 100644 --- a/analyzer/codechecker_analyzer/arg.py +++ b/analyzer/codechecker_analyzer/arg.py @@ -39,8 +39,7 @@ class OrderedCheckersAction(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: raise ValueError("nargs not allowed") - super(OrderedCheckersAction, self).__init__(option_strings, dest, - **kwargs) + super().__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, value, option_string=None): @@ -64,8 +63,7 @@ def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs != '*': raise ValueError("nargs must be '*' for backward compatibility " "reasons!") - super(OrderedConfigAction, self).__init__(option_strings, dest, - nargs, **kwargs) + super().__init__(option_strings, dest, nargs, **kwargs) def __call__(self, parser, namespace, value, option_string=None): diff --git a/analyzer/codechecker_analyzer/buildlog/build_action.py b/analyzer/codechecker_analyzer/buildlog/build_action.py index 73393a9c66..692e164a6a 100644 --- a/analyzer/codechecker_analyzer/buildlog/build_action.py +++ b/analyzer/codechecker_analyzer/buildlog/build_action.py @@ -5,7 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- -"""""" class BuildAction: @@ -35,19 +34,19 @@ class BuildAction: def __init__(self, **kwargs): # Filtered list of options. for slot in BuildAction.__slots__: - super(BuildAction, self).__setattr__(slot, kwargs[slot]) + super().__setattr__(slot, kwargs[slot]) def __str__(self): """ Return all the members of the __slots__ list. """ info = [(member, getattr(self, member)) for member in self.__slots__] - return ('\n'.join([f'{key}: {value}' for key, value in info])) + return '\n'.join([f'{key}: {value}' for key, value in info]) def __setattr__(self, attr, value): if hasattr(self, attr) and getattr(self, attr) != value: raise AttributeError("BuildAction is immutable") - super(BuildAction, self).__setattr__(attr, value) + super().__setattr__(attr, value) def __eq__(self, other): return other.original_command == self.original_command diff --git a/analyzer/codechecker_analyzer/buildlog/build_manager.py b/analyzer/codechecker_analyzer/buildlog/build_manager.py index 7066c161df..66cfcba1d1 100644 --- a/analyzer/codechecker_analyzer/buildlog/build_manager.py +++ b/analyzer/codechecker_analyzer/buildlog/build_manager.py @@ -26,7 +26,7 @@ LOG = get_logger('buildlogger') -def execute_buildcmd(command, silent=False, env=None, cwd=None): +def execute_buildcmd(command, silent=False, environ=None, cwd=None): """ Execute the the build command and continuously write the output from the process to the standard output. @@ -34,7 +34,7 @@ def execute_buildcmd(command, silent=False, env=None, cwd=None): proc = subprocess.Popen( command, bufsize=-1, - env=env, + env=environ, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd, diff --git a/analyzer/codechecker_analyzer/buildlog/host_check.py b/analyzer/codechecker_analyzer/buildlog/host_check.py index a07a2bbfa6..e73187b17a 100644 --- a/analyzer/codechecker_analyzer/buildlog/host_check.py +++ b/analyzer/codechecker_analyzer/buildlog/host_check.py @@ -36,9 +36,9 @@ def check_intercept(env) -> bool: if not res: return True - else: - LOG.debug('Failed to run: "%s"', ' '.join(intercept_cmd)) - return False + + LOG.debug('Failed to run: "%s"', ' '.join(intercept_cmd)) + return False except subprocess.CalledProcessError: LOG.debug('Failed to run: "%s", process returned non-zero exit code.', ' '.join(intercept_cmd)) @@ -49,7 +49,7 @@ def check_intercept(env) -> bool: # It is possible that another build logger is available. LOG.debug(oerr) LOG.debug('Failed to run: "%s"', ' '.join(intercept_cmd)) - return False + return False def check_ldlogger(env) -> bool: diff --git a/analyzer/codechecker_analyzer/buildlog/log_parser.py b/analyzer/codechecker_analyzer/buildlog/log_parser.py index a31e57045d..615d468f67 100644 --- a/analyzer/codechecker_analyzer/buildlog/log_parser.py +++ b/analyzer/codechecker_analyzer/buildlog/log_parser.py @@ -8,7 +8,7 @@ from collections import namedtuple -# pylint: disable=no-name-in-module +# pylint: disable=deprecated-module from distutils.spawn import find_executable from enum import Enum from pathlib import Path @@ -373,6 +373,7 @@ def __get_compiler_err(cmd: List[str]) -> Optional[str]: LOG.error( "Error during process execution: %s\n%s\n", ' '.join(map(shlex.quote, cmd)), oerr.strerror) + return None @staticmethod def __parse_compiler_includes(compile_cmd: List[str]): @@ -431,8 +432,7 @@ def get_compiler_includes(compiler, language, compiler_flags): LOG.debug( "Retrieving default includes via %s", ' '.join(map(shlex.quote, cmd))) - ICI = ImplicitCompilerInfo - include_dirs = ICI.__parse_compiler_includes(cmd) + include_dirs = ImplicitCompilerInfo.__parse_compiler_includes(cmd) return list(map(os.path.normpath, include_dirs)) @@ -474,7 +474,7 @@ def get_compiler_standard(compiler, language): is fetched. language -- The programming lenguage being compiled (e.g. 'c' or 'c++') """ - VERSION_C = """ + version_c = """ #ifdef __STDC_VERSION__ # if __STDC_VERSION__ >= 201710L # error CC_FOUND_STANDARD_VER#17 @@ -492,7 +492,7 @@ def get_compiler_standard(compiler, language): #endif """ - VERSION_CPP = """ + version_cpp = """ #ifdef __cplusplus # if __cplusplus >= 201703L # error CC_FOUND_STANDARD_VER#17 @@ -517,7 +517,7 @@ def get_compiler_standard(compiler, language): encoding='utf-8') as source: with source.file as f: - f.write(VERSION_C if language == 'c' else VERSION_CPP) + f.write(version_c if language == 'c' else version_cpp) err = ImplicitCompilerInfo.\ __get_compiler_err([compiler, source.name]) @@ -693,6 +693,8 @@ def __collect_clang_compile_opts(flag_iterator, details): details['analyzer_options'].append(flag_iterator.item) return True + return False + def __collect_transform_xclang_opts(flag_iterator, details): """Some specific -Xclang constucts need to be filtered out. @@ -1190,7 +1192,6 @@ def extend_compilation_database_entries(compilation_database): class CompileCommandEncoder(json.JSONEncoder): """JSON serializer for objects not serializable by default json code""" - # pylint: disable=method-hidden def default(self, o): if isinstance(o, BuildAction): return o.to_dict() @@ -1270,7 +1271,8 @@ def parse_unique_log(compilation_database, used to execute the analysis """ try: - uniqued_build_actions = dict() + uniqued_build_actions = {} + uniqueing_re = None if compile_uniqueing == "alpha": build_action_uniqueing = CompileActionUniqueingType.SOURCE_ALPHA diff --git a/analyzer/codechecker_analyzer/cmd/analyze.py b/analyzer/codechecker_analyzer/cmd/analyze.py index c1c75ccb50..e2116cdba7 100644 --- a/analyzer/codechecker_analyzer/cmd/analyze.py +++ b/analyzer/codechecker_analyzer/cmd/analyze.py @@ -40,7 +40,7 @@ header_file_extensions = ( '.h', '.hh', '.H', '.hp', '.hxx', '.hpp', '.HPP', '.h++', '.tcc') -epilog_env_var = f""" +EPILOG_ENV_VAR = """ CC_ANALYZERS_FROM_PATH Set to `yes` or `1` to enforce taking the analyzers from the `PATH` instead of the given binaries. CC_ANALYZER_BIN Set the absolute paths of an analyzer binaries. @@ -54,7 +54,7 @@ variable. """ -epilog_issue_hashes = """ +EPILOG_ISSUE_HASHES = """ Issue hashes ------------------------------------------------ - By default the issue hash calculation method for 'Clang Static Analyzer' is @@ -105,7 +105,7 @@ https://github.com/Ericsson/codechecker/blob/master/docs/analyzer/report_identification.md """ -epilog_exit_status = """ +EPILOG_EXIT_STATUS = """ Exit status ------------------------------------------------ 0 - Successful analysis @@ -134,11 +134,11 @@ def get_argparser_ctor_args(): 'epilog': f""" Environment variables ------------------------------------------------ -{epilog_env_var} +{EPILOG_ENV_VAR} -{epilog_issue_hashes} +{EPILOG_ISSUE_HASHES} -{epilog_exit_status} +{EPILOG_EXIT_STATUS} Compilation databases can be created by instrumenting your project's build via 'CodeChecker log'. To transform the results of the analysis to a human-friendly @@ -922,7 +922,7 @@ def __get_skip_handlers(args, compile_commands) -> SkipListHandlers: # Creates a skip file where all source files will be skipped except # the given source files and all the header files. - skip_files = ['+{0}'.format(f) for f in source_file_paths] + skip_files = [f'+{f}' for f in source_file_paths] skip_files.extend(['+/*.h', '+/*.H', '+/*.tcc']) skip_files.append('-*') content = "\n".join(skip_files) @@ -1188,8 +1188,8 @@ def main(args): 'name': 'codechecker', 'action_num': len(actions), 'command': sys.argv, - 'version': "{0} ({1})".format(context.package_git_tag, - context.package_git_hash), + 'version': + f"{context.package_git_tag} ({context.package_git_hash})", 'working_directory': os.getcwd(), 'output_path': args.output_path, 'result_source_files': {}, @@ -1261,7 +1261,6 @@ def main(args): LOG.debug("Sending analyzer statistics finished.") except Exception: LOG.debug("Failed to send analyzer statistics!") - pass # Generally exit status is set by sys.exit() call in CodeChecker. However, # exit code 3 has a special meaning: it returns when the underlying @@ -1274,3 +1273,5 @@ def main(args): for analyzer_data in metadata_tool['analyzers'].values(): if analyzer_data['analyzer_statistics']['failed'] != 0: return 3 + + return 0 diff --git a/analyzer/codechecker_analyzer/cmd/analyzers.py b/analyzer/codechecker_analyzer/cmd/analyzers.py index e677631852..aa6e4b2722 100644 --- a/analyzer/codechecker_analyzer/cmd/analyzers.py +++ b/analyzer/codechecker_analyzer/cmd/analyzers.py @@ -184,8 +184,8 @@ def uglify(text): header = list(map(uglify, header)) rows = [] - for analyzer_name in analyzer_types.supported_analyzers: - analyzer_class = analyzer_types.supported_analyzers[analyzer_name] + for analyzer_name, analyzer_class in \ + analyzer_types.supported_analyzers.items(): check_env = context.analyzer_env version = analyzer_class.get_binary_version(check_env) if not version: diff --git a/analyzer/codechecker_analyzer/cmd/check.py b/analyzer/codechecker_analyzer/cmd/check.py index 17e872f90a..2e20d19e6d 100644 --- a/analyzer/codechecker_analyzer/cmd/check.py +++ b/analyzer/codechecker_analyzer/cmd/check.py @@ -22,20 +22,21 @@ OrderedCheckersAction, OrderedConfigAction, \ analyzer_config, checker_config, existing_abspath -from codechecker_common import arg, cmd_config, logger -from codechecker_common.compatibility.multiprocessing import cpu_count -from codechecker_common.source_code_comment_handler import \ - REVIEW_STATUS_VALUES - from codechecker_analyzer.cmd.analyze import \ - epilog_env_var as analyzer_epilog_env_var, \ - epilog_issue_hashes as analyzer_epilog_issue_hashes + EPILOG_ENV_VAR as analyzer_epilog_env_var, \ + EPILOG_ISSUE_HASHES as analyzer_epilog_issue_hashes from codechecker_analyzer.cmd.log import \ - epilog_env_var as log_epilog_env_var + EPILOG_ENV_VAR as log_epilog_env_var from codechecker_analyzer.cmd.parse import \ - epilog_env_var as parse_epilog_env_var + EPILOG_ENV_VAR as parse_epilog_env_var + +from codechecker_common import arg, cmd_config, logger +from codechecker_common.compatibility.multiprocessing import cpu_count +from codechecker_common.source_code_comment_handler import \ + REVIEW_STATUS_VALUES + LOG = logger.get_logger('system') @@ -786,8 +787,7 @@ def add_arguments_to_parser(parser): choices=REVIEW_STATUS_VALUES, default=["confirmed", "unreviewed"], help="Filter results by review statuses. Valid " - "values are: {0}".format( - ', '.join(REVIEW_STATUS_VALUES))) + "values are: {', '.join(REVIEW_STATUS_VALUES)}") logger.add_verbose_arguments(parser) parser.set_defaults( diff --git a/analyzer/codechecker_analyzer/cmd/checkers.py b/analyzer/codechecker_analyzer/cmd/checkers.py index e55032dc8d..fc26a2c7d8 100644 --- a/analyzer/codechecker_analyzer/cmd/checkers.py +++ b/analyzer/codechecker_analyzer/cmd/checkers.py @@ -21,11 +21,11 @@ from codechecker_analyzer import analyzer_context from codechecker_analyzer.analyzers import analyzer_types +from codechecker_analyzer.analyzers.config_handler import CheckerState from codechecker_common import arg, logger from codechecker_common.output import USER_FORMATS from codechecker_common.checker_labels import CheckerLabels -from codechecker_analyzer.analyzers.config_handler import CheckerState LOG = logger.get_logger('system') @@ -48,9 +48,9 @@ def get_argparser_ctor_args(): # Epilogue is shown after the arguments when the help is queried # directly. - 'epilog': """ + 'epilog': f""" The list of checkers that are enabled or disabled by default can be edited by -editing "profile:default" labels in the directory '{}'. +editing "profile:default" labels in the directory '{labels_dir_path}'. Example scenario: List checkers by labels ----------------------------------------- @@ -78,7 +78,7 @@ def get_argparser_ctor_args(): List labels and their available values: CodeChecker checkers --label CodeChecker checkers --label severity -""".format(os.path.join(labels_dir_path)), +""", # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. @@ -205,12 +205,12 @@ def __guideline_to_label( if args.guideline in guidelines: return f'guideline:{args.guideline}' - else: - if args.guideline.find(':') == -1: - LOG.error('--guideline parameter is either or ' - ':') - sys.exit(1) - return args.guideline + elif args.guideline.find(':') == -1: + LOG.error('--guideline parameter is either or ' + ':') + sys.exit(1) + + return args.guideline def __get_detailed_checker_info( @@ -261,8 +261,8 @@ def __get_detailed_checker_info( for checker, (state, description) in config_handler.checks().items(): labels = cl.labels_of_checker(checker, analyzer) - state = CheckerState.enabled if ('profile', 'default') in labels \ - else CheckerState.disabled + state = CheckerState.ENABLED if ('profile', 'default') in labels \ + else CheckerState.DISABLED checker_info[analyzer].append( (state, checker, analyzer, description, sorted(labels))) @@ -335,8 +335,8 @@ def __print_guidelines(args: argparse.Namespace, cl: CheckerLabels): if args.output_format == 'rows': for row in rows: - print('Guideline: {}'.format(row[0])) - print('Rules: {}'.format(row[1])) + print(f'Guideline: {row[0]}') + print(f'Rules: {row[1]}') else: print(twodim.to_str(args.output_format, header, rows)) @@ -387,7 +387,7 @@ def __format_row(row: Tuple) -> Tuple: row -- A tuple with detailed checker info coming from __get_detailed_checker_info() function. """ - state = '+' if row[0] == CheckerState.enabled else '-' + state = '+' if row[0] == CheckerState.ENABLED else '-' labels = ', '.join(f'{k}:{v}' for k, v in row[4]) return state, row[1], row[2], row[3], labels @@ -399,10 +399,8 @@ def __print_checkers_custom_format(checkers: Iterable): format. Due to its customness it isn't implemented in module twodim. """ for checker in checkers: - if checker[0] == CheckerState.enabled: - status = 'enabled' - elif checker[0] == CheckerState.disabled: - status = 'disabled' + status = 'enabled' if checker[0] == CheckerState.ENABLED \ + else 'disabled' print(checker[1]) print(' Status:', status) @@ -423,9 +421,9 @@ def __print_checkers_json_format(checkers: Iterable, detailed: bool): structure differs from other twodim formats. """ def checker_info_dict(c): - if c[0] == CheckerState.enabled: + if c[0] == CheckerState.ENABLED: status = 'enabled' - elif c[0] == CheckerState.disabled: + elif c[0] == CheckerState.DISABLED: status = 'disabled' else: status = 'unknown' @@ -480,6 +478,8 @@ def __print_checkers(args: argparse.Namespace, cl: CheckerLabels): for analyzer in args.analyzers: if labels: checkers = cl.checkers_by_labels(labels, analyzer) + # Variable "checkers" is consumed immediately. + # pylint: disable=cell-var-from-loop result.extend( filter(lambda x: x[1] in checkers, checker_info[analyzer])) else: diff --git a/analyzer/codechecker_analyzer/cmd/fixit.py b/analyzer/codechecker_analyzer/cmd/fixit.py index 9064bdef1b..33bcd3d020 100644 --- a/analyzer/codechecker_analyzer/cmd/fixit.py +++ b/analyzer/codechecker_analyzer/cmd/fixit.py @@ -99,7 +99,8 @@ def get_location_by_offset(filename, offset): """ This function returns the line and column number in the given file which is located at the given offset (i.e. number of characters including new - line characters). + line characters). None returns when the offset is greater than the file + length. """ with open(filename, encoding='utf-8', errors='ignore') as f: for row, line in enumerate(f, 1): @@ -109,6 +110,8 @@ def get_location_by_offset(filename, offset): else: return row, offset + 1 + return None + def clang_tidy_fixit_filter(content, checker_names, file_paths, reports, modification_time, interactive): diff --git a/analyzer/codechecker_analyzer/cmd/log.py b/analyzer/codechecker_analyzer/cmd/log.py index 8cca7168af..1f6aabe296 100644 --- a/analyzer/codechecker_analyzer/cmd/log.py +++ b/analyzer/codechecker_analyzer/cmd/log.py @@ -24,7 +24,7 @@ from codechecker_common import arg, logger -epilog_env_var = f""" +EPILOG_ENV_VAR = """ CC_LOGGER_ABS_PATH If the environment variable is defined, all relative paths in the compilation commands after '-I, -idirafter, -imultilib, -iquote, -isysroot -isystem, @@ -78,18 +78,18 @@ def get_argparser_ctor_args(): 'formatter_class': arg.RawDescriptionDefaultHelpFormatter, # Description is shown when the command's help is queried directly - 'description': """ + 'description': f""" Runs the given build command and records the executed compilation steps. These steps are written to the output file in a JSON format. -Available build logger tool that will be used is '{0}'.{1} -""".format('intercept-build' if is_intercept else 'ld-logger', - ldlogger_settings), +Available build logger tool that will be used is +'{"intercept-build" if is_intercept else "ld-logger"}'.{ldlogger_settings} +""", 'epilog': f""" Environment variables ------------------------------------------------ -{epilog_env_var} +{EPILOG_ENV_VAR} """, # Help is shown when the "parent" CodeChecker command lists the diff --git a/analyzer/codechecker_analyzer/cmd/parse.py b/analyzer/codechecker_analyzer/cmd/parse.py index f9bd1fcb56..5c0124aaa5 100644 --- a/analyzer/codechecker_analyzer/cmd/parse.py +++ b/analyzer/codechecker_analyzer/cmd/parse.py @@ -49,7 +49,7 @@ def init_logger(level, stream=None, logger_name='system'): EXPORT_TYPES = ['html', 'json', 'codeclimate', 'gerrit', 'baseline'] -epilog_env_var = f""" +EPILOG_ENV_VAR = """ CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when generating gerrit output. CC_REPO_DIR Root directory of the sources, i.e. the directory @@ -59,7 +59,7 @@ def init_logger(level, stream=None, logger_name='system'): generating gerrit output. """ -epilog_exit_status = """ +EPILOG_EXIT_STATUS = """ 0 - No report 1 - CodeChecker error 2 - At least one report emitted by an analyzer @@ -85,11 +85,11 @@ def get_argparser_ctor_args(): 'epilog': f""" Environment variables ------------------------------------------------ -{epilog_env_var} +{EPILOG_ENV_VAR} Exit status ------------------------------------------------ -{epilog_exit_status} +{EPILOG_EXIT_STATUS} """, # Help is shown when the "parent" CodeChecker command lists the @@ -206,8 +206,7 @@ def add_arguments_to_parser(parser): choices=REVIEW_STATUS_VALUES, default=["confirmed", "unreviewed"], help="Filter results by review statuses. Valid " - "values are: {0}".format( - ', '.join(REVIEW_STATUS_VALUES))) + f"values are: {', '.join(REVIEW_STATUS_VALUES)}") group = parser.add_argument_group("file filter arguments") @@ -374,6 +373,8 @@ def get_output_file_path(default_file_name: str) -> Optional[str]: if output_dir_path: return os.path.join(output_dir_path, default_file_name) + return None + skip_handlers = SkipListHandlers() if 'files' in args: items = [f"+{file_path}" for file_path in args.files] diff --git a/analyzer/codechecker_analyzer/compilation_database.py b/analyzer/codechecker_analyzer/compilation_database.py index d9753f12b7..7696ad3d89 100644 --- a/analyzer/codechecker_analyzer/compilation_database.py +++ b/analyzer/codechecker_analyzer/compilation_database.py @@ -52,6 +52,8 @@ def find_closest_compilation_database(path: str) -> Optional[str]: if path == root: break + return None + def change_args_to_command_in_comp_db(compile_commands: List[Dict]): """ @@ -166,6 +168,7 @@ def __select_compilation_database( longest = os.path.commonpath(comp_db_paths + [source_file]) if longest in comp_db_paths: return os.path.join(longest, COMPILATION_DATABASE) + return None # Case 1: analysis_input is a compilation database JSON file. diff --git a/analyzer/codechecker_analyzer/env.py b/analyzer/codechecker_analyzer/env.py index dd957e2ae6..454a59c54c 100644 --- a/analyzer/codechecker_analyzer/env.py +++ b/analyzer/codechecker_analyzer/env.py @@ -5,7 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- -"""""" import os diff --git a/analyzer/codechecker_analyzer/host_check.py b/analyzer/codechecker_analyzer/host_check.py index dcf80d1f60..1445db51dc 100644 --- a/analyzer/codechecker_analyzer/host_check.py +++ b/analyzer/codechecker_analyzer/host_check.py @@ -45,7 +45,7 @@ def check_analyzer(compiler_bin, env): if oerr.errno == errno.ENOENT: LOG.error(oerr) LOG.error('Failed to run: "%s"', ' '.join(clang_version_cmd)) - return False + return False def has_analyzer_config_option(clang_bin, config_option_name): @@ -68,7 +68,7 @@ def has_analyzer_config_option(clang_bin, config_option_name): match = re.search(config_option_name, out) if match: LOG.debug("Config option '%s' is available.", config_option_name) - return (True if match else False) + return bool(match) except OSError: LOG.error('Failed to run: "%s"', ' '.join(cmd)) @@ -79,12 +79,12 @@ def has_analyzer_option(clang_bin, feature): """Test if the analyzer has a specific option. Testing a feature is done by compiling a dummy file.""" - with tempfile.NamedTemporaryFile("w", encoding='utf-8') as inputFile: - inputFile.write("void foo(){}") - inputFile.flush() + with tempfile.NamedTemporaryFile("w", encoding='utf-8') as input_file: + input_file.write("void foo(){}") + input_file.flush() cmd = [clang_bin, "-x", "c", "--analyze"] cmd.extend(feature) - cmd.extend([inputFile.name, "-o", "-"]) + cmd.extend([input_file.name, "-o", "-"]) LOG.debug('run: "%s"', ' '.join(cmd)) try: diff --git a/analyzer/codechecker_analyzer/makefile.py b/analyzer/codechecker_analyzer/makefile.py index a473622791..8c5b52196b 100644 --- a/analyzer/codechecker_analyzer/makefile.py +++ b/analyzer/codechecker_analyzer/makefile.py @@ -81,10 +81,10 @@ def __write_header(self, mfile): file is auto generated by CodeChecker and print out the exact CodeChecker version. """ - mfile.write("#\n# Autogenerated by CodeChecker v{0}.\n#\n" + mfile.write("#\n# Autogenerated by CodeChecker " + f"v{analyzer_context.get_context().version}.\n#\n" "# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT " - "YOU ARE DOING.\n#\n\n".format( - analyzer_context.get_context().version)) + "YOU ARE DOING.\n#\n\n") def __write_env_exports(self, mfile): """ Exports environment variables. """ @@ -93,13 +93,12 @@ def __write_env_exports(self, mfile): bin_dir = os.getenv('CC_BIN_DIR', '') python3_bin = os.path.join(data_files_dir_path, 'python3', 'bin') - mfile.write('export PATH := {0}:{1}:${{PATH}}\n'.format( - bin_dir, python3_bin)) + mfile.write(f'export PATH := {bin_dir}:{python3_bin}:${{PATH}}\n') for env_var in ["LD_LIBRARY_PATH", "PYTHONPATH", "PYTHONHOME"]: value = os.getenv(env_var) if value: - mfile.write('export {0} := {1}\n'.format(env_var, value)) + mfile.write(f'export {env_var} := {value}\n') mfile.write('\n') @@ -115,8 +114,8 @@ def __write_default_targets(self, mfile): for analyzer in self.__analyzers: analyzer_name = self.__format_analyzer_type(analyzer) - mfile.write("# Target to run only '{0}' analysis.\n" - "all: all_{0}\n\n".format(analyzer_name)) + mfile.write(f"# Target to run only '{analyzer_name}' analysis.\n" + f"all: all_{analyzer_name}\n\n") def __get_ctu_pre_analysis_cmds(self, action): """ Get CTU pre-analysis commands. """ @@ -128,7 +127,7 @@ def __get_ctu_pre_analysis_cmds(self, action): # Get command to generate PCH file. cmd, ast_dir = generate_ast_cmd(action, self.__config, triple_arch, action.source) - cmds.append('mkdir -p {0}'.format(ast_dir)) + cmds.append(f'mkdir -p {ast_dir}') cmds.append(' '.join(cmd)) # Get command to create CTU index file. @@ -137,11 +136,10 @@ def __get_ctu_pre_analysis_cmds(self, action): fnmap_tmp_dir = os.path.join(self.__ctu_dir, triple_arch, self.__ctu_temp_fnmap_folder) - cmds.append('mkdir -p {0}'.format(fnmap_tmp_dir)) + cmds.append(f'mkdir -p {fnmap_tmp_dir}') func_def_map = os.path.join(fnmap_tmp_dir, str(uuid.uuid4())) - cmds.append('{0} > {1} 2>/dev/null'.format(' '.join(cmd), - func_def_map)) + cmds.append(f"{' '.join(cmd)} > {func_def_map} 2>/dev/null") # Modify externalDefMap.txt file to contain relative paths and # modify the extension to '.cpp.ast'. @@ -151,9 +149,10 @@ def __get_ctu_pre_analysis_cmds(self, action): # the sed output to a temp file and overwrite the original file with # this file. tmp_func_def_map = func_def_map + '_tmp' - cmds.append('sed -E "s|/(.*)|ast/\\1.ast|" {0} > {1}'.format( - func_def_map, tmp_func_def_map)) - cmds.append('mv -f {0} {1}'.format(tmp_func_def_map, func_def_map)) + cmds.append( + 'sed -E "s|/(.*)|ast/\\1.ast|" ' + f'{func_def_map} > {tmp_func_def_map}') + cmds.append(f'mv -f {tmp_func_def_map} {func_def_map}') return cmds @@ -169,8 +168,7 @@ def __get_stats_pre_analysis_cmds(self, action): output_id = source_filename + str(uuid.uuid4()) + '.stat' stat_for_source = os.path.join(self.__stat_tmp_dir, output_id) - cmds.append('{0} > {1} 2>&1'.format(' '.join(stats_cmd), - stat_for_source)) + cmds.append(f"{' '.join(stats_cmd)} > {stat_for_source} 2>&1") return cmds @@ -189,14 +187,11 @@ def __write_pre_analysis_targets(self, mfile, action, pre_all_target): commands = '\n'.join(['\t@' + c for c in pre_all_cmds]) target = self.__get_target_name(action) - mfile.write('{0}:\n' - '\t@echo "{4} Pre-analysis of {3}."\n' - '{1}\n' - '{2}: {0}\n\n'.format('pre_' + target, - commands, - pre_all_target, - action.source, - self.__log_info)) + mfile.write(f"{'pre_' + target}:\n" + f'\t@echo "{self.__log_info} ' + f'Pre-analysis of {action.source}."\n' + f'{commands}\n' + f"{pre_all_target}: {'pre_' + target}\n\n") def __write_post_pre_analysis_targets(self, mfile, pre_all_target): """ Creates targets to post-process pre-analysis results. """ @@ -205,24 +200,22 @@ def __write_post_pre_analysis_targets(self, mfile, pre_all_target): if self.__ctu_data: # Merge individual function maps into a global one. - post_all_cmds.append("find {0} -maxdepth 1 -mindepth 1 -type d " - "-exec merge-clang-extdef-mappings " - "-i {{}}/{1} -o {{}}/externalDefMap.txt " - "\\;".format( - self.__ctu_dir, - self.__ctu_temp_fnmap_folder)) + post_all_cmds.append( + f"find {self.__ctu_dir} -maxdepth 1 -mindepth 1 -type d " + "-exec merge-clang-extdef-mappings " + f"-i {{}}/{self.__ctu_temp_fnmap_folder} " + "-o {}/externalDefMap.txt \\;") if self.__statistics_data: # Collect statistics from the clang analyzer output. - post_all_cmds.append("post-process-stats -i {0} {1}".format( - self.__stat_tmp_dir, - self.__stats_dir)) + post_all_cmds.append( + "post-process-stats " + f"-i {self.__stat_tmp_dir} {self.__stats_dir}") commands = '\n'.join(['\t@' + c for c in post_all_cmds]) - mfile.write('post_{0}: {0}\n' - '{1}\n\n'.format(pre_all_target, - commands)) + mfile.write(f'post_{pre_all_target}: {pre_all_target}\n' + f'{commands}\n\n') def __write_analysis_targets(self, mfile, action, post_pre_all_target): """ Creates normal analysis targets. """ @@ -254,24 +247,18 @@ def __write_analysis_targets(self, mfile, action, post_pre_all_target): "--filename", file_name, analyzer_output_file] - command = "@{0} > {1}\n" \ - "\t@{2} 1>/dev/null\n" \ - "\t@rm -rf {1}\n".format(' '.join(analyzer_cmd), - analyzer_output_file, - ' '.join(report_converter_cmd)) + command = f"@{' '.join(analyzer_cmd)} > {analyzer_output_file}\n" \ + f"\t@{' '.join(report_converter_cmd)} 1>/dev/null\n" \ + f"\t@rm -rf {analyzer_output_file}\n" else: - command = "@{0} 1>/dev/null".format(' '.join(analyzer_cmd)) - - mfile.write('{0}: {1}\n' - '\t@echo "{6} {4} analyze {5}."\n' - '\t{2}\n' - 'all_{3}: {0}\n\n'.format(target, - post_pre_all_target, - command, - analyzer_name, - action.analyzer_type, - action.source, - self.__log_info)) + command = f"@{' '.join(analyzer_cmd)} 1>/dev/null" + + mfile.write( + f'{target}: {post_pre_all_target}\n' + f'\t@echo "{self.__log_info} {action.analyzer_type} ' + f'analyze {action.source}."\n' + f'\t{command}\n' + f'all_{analyzer_name}: {target}\n\n') def create(self, actions): """ Creates a Makefile from the given actions. """ diff --git a/analyzer/codechecker_analyzer/pre_analysis_manager.py b/analyzer/codechecker_analyzer/pre_analysis_manager.py index 41086cffe9..9adc885e24 100644 --- a/analyzer/codechecker_analyzer/pre_analysis_manager.py +++ b/analyzer/codechecker_analyzer/pre_analysis_manager.py @@ -41,7 +41,7 @@ def collect_statistics(action, source, clangsa_config, statistics_data): if not can_collect: LOG.debug('Can not collect statistical data.') - return + return None # TODO: shlex.join() will be more convenient in Python 3.8. LOG.debug_analyzer(' '.join(map(shlex.quote, cmd))) @@ -73,20 +73,20 @@ def collect_statistics(action, source, clangsa_config, statistics_data): # Progress reporting. -progress_checked_num = None -progress_actions = None +PROGRESS_CHECKED_NUM = None +PROGRESS_ACTIONS = None def init_worker(checked_num, action_num): - global progress_checked_num, progress_actions - progress_checked_num = checked_num - progress_actions = action_num + global PROGRESS_CHECKED_NUM, PROGRESS_ACTIONS + PROGRESS_CHECKED_NUM = checked_num + PROGRESS_ACTIONS = action_num def pre_analyze(params): action, clangsa_config, skip_handlers, ctu_data, statistics_data = params - progress_checked_num.value += 1 + PROGRESS_CHECKED_NUM.value += 1 if skip_handlers and skip_handlers.should_skip(action.source): return @@ -96,8 +96,8 @@ def pre_analyze(params): _, source_filename = os.path.split(action.source) LOG.info("[%d/%d] %s", - progress_checked_num.value, - progress_actions.value, source_filename) + PROGRESS_CHECKED_NUM.value, + PROGRESS_ACTIONS.value, source_filename) try: if ctu_data: @@ -150,14 +150,13 @@ def run_pre_analysis(actions, clangsa_config, """ Run multiple pre analysis jobs before the actual analysis. """ - # pylint: disable=no-member multiprocess module members. LOG.info('Pre-analysis started.') if ctu_data: LOG.info("Collecting data for ctu analysis.") if statistics_data: LOG.info("Collecting data for statistical analysis.") - def signal_handler(signum, frame): + def signal_handler(signum, _): try: pool.terminate() manager.shutdown() diff --git a/analyzer/codechecker_analyzer/suppress_handler.py b/analyzer/codechecker_analyzer/suppress_handler.py index 63fa7ae489..0a422323f0 100644 --- a/analyzer/codechecker_analyzer/suppress_handler.py +++ b/analyzer/codechecker_analyzer/suppress_handler.py @@ -85,7 +85,7 @@ def skip_suppress_status(self, status) -> bool: def get_suppressed(self, report: Report) -> bool: """ True if the given report is suppressed. """ - return any([suppress for suppress in self.__suppress_info - if suppress[0] == report.report_hash and - suppress[1] == report.file.name and - self.skip_suppress_status(suppress[3])]) + return any(suppress for suppress in self.__suppress_info + if suppress[0] == report.report_hash and + suppress[1] == report.file.name and + self.skip_suppress_status(suppress[3])) diff --git a/analyzer/requirements_py/dev/requirements.txt b/analyzer/requirements_py/dev/requirements.txt index 3089b72601..c9f40124f1 100644 --- a/analyzer/requirements_py/dev/requirements.txt +++ b/analyzer/requirements_py/dev/requirements.txt @@ -1,6 +1,6 @@ pytest==7.3.1 pycodestyle==2.12.0 -pylint==2.8.2 +pylint==3.2.4 mkdocs==1.5.3 coverage==5.5.0 diff --git a/analyzer/tests/Makefile b/analyzer/tests/Makefile index a2ceaf51f3..88e85bf67e 100644 --- a/analyzer/tests/Makefile +++ b/analyzer/tests/Makefile @@ -23,7 +23,7 @@ pycodestyle_in_env: venv_dev PYLINT_TEST_CMD = $(MAKE) -C $(CURRENT_DIR)/tools/merge_clang_extdef_mappings pylint && \ $(MAKE) -C $(CURRENT_DIR)/tools/statistics_collector pylint && \ - PYLINTRC=$(ROOT)/.pylintrc pylint -j0 ./bin/** ./codechecker_analyzer ./tests/** + PYLINTRC=$(ROOT)/.pylintrc pylint -j0 ./codechecker_analyzer ./tests/** pylint: $(PYLINT_TEST_CMD) diff --git a/analyzer/tests/functional/analyze/test_analyze.py b/analyzer/tests/functional/analyze/test_analyze.py index 1f197aaab4..478ea84a42 100644 --- a/analyzer/tests/functional/analyze/test_analyze.py +++ b/analyzer/tests/functional/analyze/test_analyze.py @@ -47,7 +47,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def setup_method(self, method): + def setup_method(self, _): """Setup the environment for the tests.""" self.test_workspace = os.environ['TEST_WORKSPACE'] @@ -72,7 +72,7 @@ def setup_method(self, method): self.disabling_modeling_checker_regex = re.compile( r"analyzer-disable-checker=.*unix.cstring.CStringModeling.*") - def teardown_method(self, method): + def teardown_method(self, _): """Restore environment after tests have ran.""" os.chdir(self.__old_pwd) if os.path.isdir(self.report_dir): @@ -194,20 +194,20 @@ def test_compiler_info_files(self): errcode = process.returncode self.assertEqual(errcode, 0) - info_File = os.path.join(reports_dir, 'compiler_info.json') - self.assertEqual(os.path.exists(info_File), True) - self.assertNotEqual(os.stat(info_File).st_size, 0) + info_file = os.path.join(reports_dir, 'compiler_info.json') + self.assertEqual(os.path.exists(info_file), True) + self.assertNotEqual(os.stat(info_file).st_size, 0) # Test the validity of the json files. - with open(info_File, 'r', encoding="utf-8", errors="ignore") as f: + with open(info_file, 'r', encoding="utf-8", errors="ignore") as f: try: data = json.load(f) self.assertEqual(len(data), 1) # For clang we do not collect anything. self.assertTrue("g++" in data) except ValueError: - self.fail("json.load should successfully parse the file %s" - % info_File) + self.fail("json.load should successfully parse " + f"the file {info_file}") def test_compiler_info_file_is_loaded(self): ''' @@ -637,7 +637,6 @@ def check_unique_compilation_db( is_b: bool, is_s: bool ): - """ """ with open(compilation_db_file_path, encoding="utf-8", errors="ignore") as json_file: data = json.load(json_file) @@ -1080,6 +1079,7 @@ def test_cppcheck_standard(self): out = subprocess.run(analyze_cmd, cwd=self.test_dir, # env=self.env, + check=False, stdout=subprocess.PIPE).stdout.decode() # Test correct handover. @@ -1100,6 +1100,7 @@ def test_cppcheck_standard(self): out = subprocess.run(analyze_cmd, cwd=self.test_dir, # env=self.env, + check=False, stdout=subprocess.PIPE).stdout.decode() # Test if the standard is correctly transformed @@ -1225,6 +1226,7 @@ def test_analyzer_and_checker_config(self): encoding="utf-8", errors="ignore") out, _ = process.communicate() + print(out) # It's printed as a found report and in the checker statistics. # Note: If this test case fails, its pretty sure that something totally diff --git a/analyzer/tests/functional/analyze_and_parse/test_analyze_and_parse.py b/analyzer/tests/functional/analyze_and_parse/test_analyze_and_parse.py index e677c6e3b5..b50a6b2b81 100644 --- a/analyzer/tests/functional/analyze_and_parse/test_analyze_and_parse.py +++ b/analyzer/tests/functional/analyze_and_parse/test_analyze_and_parse.py @@ -120,7 +120,7 @@ def teardown_class(cls): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def teardown_method(self, method): + def teardown_method(self, _): """Restore environment after a particular test has run.""" output_dir = AnalyzeParseTestCase.test_workspaces['OUTPUT'] if os.path.isdir(output_dir): @@ -241,15 +241,14 @@ def check_one_file(self, path, mode): # The replacement on this line will be the following: # [] - x.cpp contains misspelled ... sep = re.escape(os.sep) - line = re.sub(r'^(\[\w+\]\s)(?P.+{0})' - r'(.+\:\d+\:\d+\:\s.*\s\[.*\])$'.format(sep), + line = re.sub(rf'^(\[\w+\]\s)(?P.+{sep})' + r'(.+\:\d+\:\d+\:\s.*\s\[.*\])$', r'\1\3', line) - line = re.sub(r'^\[\] - (?P.+{0})' - r'(.+ contains misspelled.+)'.format(sep), + line = re.sub(rf'^\[\] - (?P.+{sep})' + r'(.+ contains misspelled.+)', r'[] - \2', line) - if not any([line.startswith(prefix) for prefix - in skip_prefixes]): + if not any(line.startswith(prefix) for prefix in skip_prefixes): post_processed_output.append(line) print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Actual output below:") @@ -258,7 +257,7 @@ def check_one_file(self, path, mode): print(correct_output) print("Test output file: " + path) - self.maxDiff = None + self.maxDiff = None # pylint: disable=invalid-name self.assertEqual(''.join(post_processed_output), correct_output) def test_json_output_for_macros(self): @@ -346,9 +345,9 @@ def test_codeclimate_output(self): def test_gerrit_output(self): """ Test gerrit output of the parse command. """ - env = self.env.copy() + environ = self.env.copy() report_url = "localhost:8080/index.html" - env["CC_REPORT_URL"] = report_url + environ["CC_REPORT_URL"] = report_url changed_file_path = os.path.join(self.test_dir, 'files_changed') @@ -362,17 +361,18 @@ def test_gerrit_output(self): "macros.cpp": {}} changed_file.write(json.dumps(changed_files)) - env["CC_CHANGED_FILES"] = changed_file_path + environ["CC_CHANGED_FILES"] = changed_file_path test_project_macros = os.path.join(self.test_workspaces['NORMAL'], "test_files", "macros") - env["CC_REPO_DIR"] = test_project_macros + environ["CC_REPO_DIR"] = test_project_macros extract_cmd = ['CodeChecker', 'parse', test_project_macros, '-e', 'gerrit'] print(" ".join(extract_cmd)) - out, _, result = call_command(extract_cmd, cwd=self.test_dir, env=env) + out, _, result = call_command( + extract_cmd, cwd=self.test_dir, env=environ) self.assertEqual(result, 2, "Parsing not found any issue.") print(out) @@ -383,7 +383,7 @@ def test_gerrit_output(self): self.assertEqual(lbls["Code-Review"], -1) self.assertEqual(review_data["message"], "CodeChecker found 1 issue(s) in the code. " - "See: {0}".format(report_url)) + f"See: {report_url}") self.assertEqual(review_data["tag"], "jenkins") # Because the CC_CHANGED_FILES is set we will see reports only for @@ -493,9 +493,9 @@ def test_gerrit_export_exit_code_when_all_skipped(self): are skipped. """ - env = self.env.copy() + environ = self.env.copy() report_url = "localhost:8080/index.html" - env["CC_REPORT_URL"] = report_url + environ["CC_REPORT_URL"] = report_url changed_file_path = os.path.join(self.test_dir, 'files_changed') @@ -509,11 +509,11 @@ def test_gerrit_export_exit_code_when_all_skipped(self): "macros.cpp": {}} changed_file.write(json.dumps(changed_files)) - env["CC_CHANGED_FILES"] = changed_file_path + environ["CC_CHANGED_FILES"] = changed_file_path test_project_macros = os.path.join(self.test_workspaces['NORMAL'], "test_files", "macros") - env["CC_REPO_DIR"] = test_project_macros + environ["CC_REPO_DIR"] = test_project_macros skip_file_path = os.path.join(self.test_dir, 'skipall.txt') extract_cmd = ['CodeChecker', 'parse', test_project_macros, @@ -521,7 +521,7 @@ def test_gerrit_export_exit_code_when_all_skipped(self): print(" ".join(extract_cmd)) standard_output, _, result = call_command( - extract_cmd, cwd=self.test_dir, env=env) + extract_cmd, cwd=self.test_dir, env=environ) os.remove(changed_file_path) self.assertEqual(result, 0, "Parsing should not found any issue.") self.assertIn( @@ -713,7 +713,7 @@ def test_html_output_for_empty_plist(self): self.assertFalse(err) self.assertTrue(f'No report data in {plist_file_path}' in out) - self.assertTrue(f'Html file was generated:' in out) + self.assertTrue('Html file was generated:' in out) self.assertTrue('Summary' in out) self.assertTrue('statistics.html' in out) self.assertTrue('index.html' in out) diff --git a/analyzer/tests/functional/cmdline/test_cmdline.py b/analyzer/tests/functional/cmdline/test_cmdline.py index a9ff1d17db..5c52f1108a 100644 --- a/analyzer/tests/functional/cmdline/test_cmdline.py +++ b/analyzer/tests/functional/cmdline/test_cmdline.py @@ -19,11 +19,11 @@ from libtest import env -def run_cmd(cmd, env=None): +def run_cmd(cmd, environ=None): print(cmd) proc = subprocess.Popen( cmd, - env=env, + env=environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", @@ -39,6 +39,11 @@ class TestCmdline(unittest.TestCase): Simple tests to check CodeChecker command line. """ + def __init__(self, methodName): + self.test_workspace = None + self._codechecker_cmd = None + super().__init__(methodName) + def setup_class(self): """Setup the environment for the tests.""" @@ -57,7 +62,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/analyzer/tests/functional/config/test_config.py b/analyzer/tests/functional/config/test_config.py index 9bd9feac35..de571670e9 100644 --- a/analyzer/tests/functional/config/test_config.py +++ b/analyzer/tests/functional/config/test_config.py @@ -42,7 +42,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/analyzer/tests/functional/ctu/test_ctu.py b/analyzer/tests/functional/ctu/test_ctu.py index b7ad388430..1092319c7a 100644 --- a/analyzer/tests/functional/ctu/test_ctu.py +++ b/analyzer/tests/functional/ctu/test_ctu.py @@ -55,11 +55,11 @@ def teardown_class(self): print('Removing: ' + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def setup_method(self, method): + def setup_method(self, _): """ Set up workspace.""" - self.setUpWith('test_files_c', 'buildlog.json', 'reports_c') + self.setup_with('test_files_c', 'buildlog.json', 'reports_c') - def setUpWith(self, input_dir, buildlog_json, report_dir): + def setup_with(self, input_dir, buildlog_json, report_dir): """ Set up workspace with a given parameters. If called multiple times, teardown_method() must be called before this function. @@ -98,7 +98,7 @@ def setUpWith(self, input_dir, buildlog_json, report_dir): self.__old_pwd = os.getcwd() os.chdir(self.test_workspace) - def teardown_method(self, method): + def teardown_method(self, _): """ Tear down workspace.""" shutil.rmtree(self.report_dir, ignore_errors=True) @@ -111,17 +111,17 @@ def test_ctu_non_existing_dir(self): directory then analysis shouldn't fail in the pre-analysis phase of CTU. """ - with open(self.buildlog) as f: + with open(self.buildlog, encoding='utf-8') as f: buildlog = json.load(f) buildlog.append(buildlog[0]) buildlog[-1]['directory'] = 'non_existing' - with open(self.buildlog, 'w') as f: + with open(self.buildlog, 'w', encoding='utf-8') as f: json.dump(buildlog, f) cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu', self.buildlog] - proc = run(cmd, cwd=self.test_dir, env=self.env, + proc = run(cmd, cwd=self.test_dir, env=self.env, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assertIn('Analysis finished', str(proc.stdout)) @@ -134,7 +134,8 @@ def test_ctu_loading_mode_requires_ctu_mode(self): self.buildlog] self.assertEqual(1, - run(cmd, cwd=self.test_dir, env=self.env).returncode) + run(cmd, cwd=self.test_dir, env=self.env, + check=False).returncode) @skipUnlessCTUCapable def test_system_headers_last(self): @@ -154,7 +155,7 @@ def test_system_headers_last(self): '--analyzers', 'clangsa', '--ctu', self.buildlog, '--verbose', 'debug'] out = run( - cmd, cwd=self.test_dir, env=self.env, + cmd, cwd=self.test_dir, env=self.env, check=False, stdout=subprocess.PIPE).stdout.decode() ast_cmd = next(filter( lambda line: 'Generating AST using' in line, out.splitlines())) @@ -244,14 +245,14 @@ def __test_ctu_analyze_cpp(self, on_demand=False): """ Test CTU analyze phase. """ self.teardown_method(self.__test_ctu_analyze_cpp) - self.setUpWith('test_files_cpp', 'buildlog.json', 'reports_cpp') + self.setup_with('test_files_cpp', 'buildlog.json', 'reports_cpp') self.__do_ctu_collect(on_demand=on_demand) # We specifically check whether spaces in the external function map # file work properly. The format of the file changed in between # clang-15 and clang-16, and this call checks whether clang is new # enough. - if not self.__is_externalFnDef_in_new_format(on_demand=on_demand): + if not self.__is_externalfndef_in_new_format(): return output = self.__do_ctu_analyze(on_demand=on_demand) self.__check_ctu_analyze_cpp(output) @@ -299,7 +300,7 @@ def __check_ctu_collect(self, on_demand): ast_dir = os.path.join(ctu_dir, arch, 'ast') self.assertTrue(os.path.isdir(ast_dir)) - def __is_externalFnDef_in_new_format(self, on_demand): + def __is_externalfndef_in_new_format(self): """ The format of the external function map file changed in between clang-15 and clang-16, check whether this is the updated format. @@ -307,16 +308,19 @@ def __is_externalFnDef_in_new_format(self, on_demand): ctu_dir = os.path.join(self.report_dir, 'ctu-dir') self.assertTrue(os.path.isdir(ctu_dir)) + for arch in glob.glob(os.path.join(ctu_dir, '*')): new_map_file = os.path.join(ctu_dir, arch, 'externalDefMap.txt') try: - fn = open(new_map_file, "r") + fn = open(new_map_file, "r", encoding='utf-8') line = fn.readline() return line[0].isdigit() except IOError: print("Error: File does not appear to exist.") + return False + def __do_ctu_analyze(self, on_demand): """ Execute CTU analyze phase. """ @@ -357,7 +361,8 @@ def __check_ctu_analyze(self, output): connections_file = connections_files[0] self.assertTrue(connections_file.startswith('main.c')) - with open(os.path.join(connections_dir, connections_file)) as f: + with open(os.path.join( + connections_dir, connections_file), encoding='utf-8') as f: self.assertTrue(f.readline().endswith('lib.c')) def __check_ctu_analyze_cpp(self, output): @@ -409,7 +414,7 @@ def test_ctu_ondemand_yaml_format(self): generated textual format. """ self.teardown_method(self.test_ctu_ondemand_yaml_format) - self.setUpWith('test_files_c', 'complex_buildlog.json', 'reports_c') + self.setup_with('test_files_c', 'complex_buildlog.json', 'reports_c') # Copy test files to a directory which file path will be longer than # 128 chars to test the yaml parser. @@ -456,5 +461,6 @@ def assert_no_linebreak(invocation_list_file: IO): self.assertRegex(line, '^ *[-/]') for invocation_list_path in invocation_list_paths: - with open(invocation_list_path) as invocation_list_file: + with open(invocation_list_path, encoding='utf-8') \ + as invocation_list_file: assert_no_linebreak(invocation_list_file) diff --git a/analyzer/tests/functional/ctu_failure/test_ctu_failure.py b/analyzer/tests/functional/ctu_failure/test_ctu_failure.py index 98a5a13a35..7cfd191961 100644 --- a/analyzer/tests/functional/ctu_failure/test_ctu_failure.py +++ b/analyzer/tests/functional/ctu_failure/test_ctu_failure.py @@ -55,7 +55,7 @@ def teardown_class(self): print('Removing: ' + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def setup_method(self, method): + def setup_method(self, _): """ Set up workspace.""" # TEST_WORKSPACE is automatically set by test package __init__.py . @@ -85,7 +85,7 @@ def setup_method(self, method): setattr(self, DISPLAY_PROGRESS_ATTR, is_ctu_display_progress_capable( - self.__getClangSaPath())) + self.__get_clangsa_path())) print("Has display-ctu-progress=true? " + str(getattr(self, DISPLAY_PROGRESS_ATTR))) @@ -110,7 +110,7 @@ def __set_up_test_dir(self, project_path): encoding="utf-8", errors="ignore") as log_file: json.dump(build_json, log_file) - def teardown_method(self, method): + def teardown_method(self, _): """ Tear down workspace.""" shutil.rmtree(self.report_dir, ignore_errors=True) @@ -417,13 +417,15 @@ def __do_ctu_all(self, on_demand, extra_args=None): out, _, result = call_command(cmd, cwd=self.test_dir, env=self.env) return out, result - def __getClangSaPath(self): + def __get_clangsa_path(self): cmd = [self._codechecker_cmd, 'analyzers', '--details', '-o', 'json'] output, _, result = call_command(cmd, cwd=self.test_workspace, env=self.env) self.assertEqual(result, 0, "Failed to run analyzer.") json_data = json.loads(output) - for i in range(len(json_data)): - if json_data[i]["name"] == "clangsa": - return json_data[i]["path"] + for data in json_data: + if data["name"] == "clangsa": + return data["path"] + + return None diff --git a/analyzer/tests/functional/fixit/test_fixit.py b/analyzer/tests/functional/fixit/test_fixit.py index b98e0ff2c9..0e85ca2751 100644 --- a/analyzer/tests/functional/fixit/test_fixit.py +++ b/analyzer/tests/functional/fixit/test_fixit.py @@ -21,6 +21,7 @@ import time import unittest +# pylint: disable=deprecated-module from distutils.spawn import find_executable from libtest import env @@ -50,7 +51,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] @@ -64,7 +65,7 @@ def setup_method(self, method): # Change working dir to testfile dir so CodeChecker can be run easily. self.__old_pwd = os.getcwd() - def teardown_method(self, method): + def teardown_method(self, _): """Restore environment after tests have run.""" os.chdir(self.__old_pwd) if os.path.isdir(self.report_dir): @@ -131,7 +132,8 @@ def test_fixit_list(self): yaml_files = os.listdir(fixit_dir) self.assertEqual(len(yaml_files), 1) - with open(os.path.join(fixit_dir, yaml_files[0])) as f: + with open(os.path.join(fixit_dir, yaml_files[0]), encoding='utf-8') \ + as f: content = f.read() self.assertIn("v.empty()", content) diff --git a/analyzer/tests/functional/host_check/test_host_check.py b/analyzer/tests/functional/host_check/test_host_check.py index f003c28dc9..a516b33a6a 100644 --- a/analyzer/tests/functional/host_check/test_host_check.py +++ b/analyzer/tests/functional/host_check/test_host_check.py @@ -15,7 +15,7 @@ import codechecker_analyzer.host_check as hc -class Test_has_analyzer_option(unittest.TestCase): +class TestHasAnalyzerOption(unittest.TestCase): def test_existing_option(self): self.assertEqual( hc.has_analyzer_option("clang", diff --git a/analyzer/tests/functional/skip/test_skip.py b/analyzer/tests/functional/skip/test_skip.py index 41d0c462d1..79950be4ad 100644 --- a/analyzer/tests/functional/skip/test_skip.py +++ b/analyzer/tests/functional/skip/test_skip.py @@ -51,7 +51,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] @@ -233,10 +233,10 @@ def test_analyze_only_header(self): # Check that we analyzed all source files which depend on the header # file. - self.assertTrue(any(["a.cpp" in f - for f in report_dir_files])) - self.assertTrue(any(["b.cpp" in f - for f in report_dir_files])) + self.assertTrue(any("a.cpp" in f + for f in report_dir_files)) + self.assertTrue(any("b.cpp" in f + for f in report_dir_files)) # Get reports only from the header file. out, _, _ = self.__run_parse(["--file", "*/lib.h"]) @@ -307,14 +307,14 @@ def check_parse_out(out): shutil.copy(Path(self.test_workspace, "build.json"), build_json) compilation_commands = None - with open(build_json, 'r') as f: + with open(build_json, 'r', encoding='utf-8') as f: compilation_commands = json.load(f) for entry in compilation_commands: entry["directory"] = str(Path(self.test_workspace)) entry["file"] = str(Path("rel_simple", entry["file"])) - with open(build_json, 'w') as f: + with open(build_json, 'w', encoding='utf-8') as f: json.dump(compilation_commands, f) # Do the CodeChecker Analyze with --file @@ -329,7 +329,7 @@ def check_parse_out(out): for entry in compilation_commands: entry["directory"] = "." - with open(build_json, 'w') as f: + with open(build_json, 'w', encoding='utf-8') as f: json.dump(compilation_commands, f) # Do the CodeChecker Analyze with --file @@ -357,7 +357,7 @@ def test_analyze_header_with_file_option_and_intercept_json(self): # We used to crash when the build log contained 'arguments' fields in # place of 'command'. Test that we don't crash on it anymore by # manually changing 'command' to 'arguments' here. - with open(build_json) as f: + with open(build_json, encoding='utf-8') as f: build_actions = json.load(f) for ba in build_actions: ba['arguments'] = shlex.split(ba['command']) @@ -365,7 +365,7 @@ def test_analyze_header_with_file_option_and_intercept_json(self): build_json = os.path.join(self.test_workspace, "build_intercept.json") - with open(build_json, 'w') as f: + with open(build_json, 'w', encoding='utf-8') as f: json.dump(build_actions, f) header_file = os.path.join(self.test_dir, "simple", "skip.h") diff --git a/analyzer/tests/functional/suppress/test_suppress.py b/analyzer/tests/functional/suppress/test_suppress.py index b227c8d8b2..60eb9ff91c 100644 --- a/analyzer/tests/functional/suppress/test_suppress.py +++ b/analyzer/tests/functional/suppress/test_suppress.py @@ -126,7 +126,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE) - def setup_method(self, method): + def setup_method(self, _): self._test_workspace = os.environ['TEST_WORKSPACE'] self._testproject_data = env.setup_test_proj_cfg(self._test_workspace) @@ -171,7 +171,8 @@ def test_source_suppress_export(self): diff = set(expected_content).symmetric_difference( generated_content) print("difference") - {print(elem) for elem in diff} + for elem in diff: + print(elem) self.assertEqual(len(diff), 0, "The generated suppress file does not " diff --git a/analyzer/tests/functional/z3/test_z3.py b/analyzer/tests/functional/z3/test_z3.py index 75e0c0ffcb..5dd223b322 100644 --- a/analyzer/tests/functional/z3/test_z3.py +++ b/analyzer/tests/functional/z3/test_z3.py @@ -10,6 +10,7 @@ """ Z3 feature test. """ +# pylint: disable=deprecated-module from distutils import util import os import shutil @@ -91,7 +92,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/analyzer/tests/libtest/codechecker.py b/analyzer/tests/libtest/codechecker.py index 50a885bf56..31f485ebe6 100644 --- a/analyzer/tests/libtest/codechecker.py +++ b/analyzer/tests/libtest/codechecker.py @@ -14,6 +14,7 @@ import shlex import subprocess +# pylint: disable=deprecated-module from distutils import util from codechecker_analyzer import host_check diff --git a/analyzer/tests/libtest/ctu_decorators.py b/analyzer/tests/libtest/ctu_decorators.py index 425c416252..e1d7d0ecde 100644 --- a/analyzer/tests/libtest/ctu_decorators.py +++ b/analyzer/tests/libtest/ctu_decorators.py @@ -10,19 +10,19 @@ """ from functools import partial -from .decorators import makeSkipUnlessAttributeFound +from .decorators import make_skip_unless_attribute_found NO_CTU_MESSAGE = "CTU is not supported" NO_CTU_ON_DEMAND_MESSAGE = "CTU-on-demand is not supported" NO_CTU_DISPLAY_PROGRESS_MESSAGE = "CTU diplay progress is not supported" -makeSkipUnlessCTUCapable = partial(makeSkipUnlessAttributeFound, +makeSkipUnlessCTUCapable = partial(make_skip_unless_attribute_found, message=NO_CTU_MESSAGE) -makeSkipUnlessCTUOnDemandCapable = partial(makeSkipUnlessAttributeFound, +makeSkipUnlessCTUOnDemandCapable = partial(make_skip_unless_attribute_found, message=NO_CTU_ON_DEMAND_MESSAGE) makeSkipUnlessCTUDisplayCapable = partial( - makeSkipUnlessAttributeFound, + make_skip_unless_attribute_found, message=NO_CTU_DISPLAY_PROGRESS_MESSAGE) diff --git a/analyzer/tests/libtest/decorators.py b/analyzer/tests/libtest/decorators.py index c8422e2aa5..3483317544 100644 --- a/analyzer/tests/libtest/decorators.py +++ b/analyzer/tests/libtest/decorators.py @@ -12,7 +12,7 @@ from functools import wraps -def makeSkipUnlessAttributeFound(attribute, message): +def make_skip_unless_attribute_found(attribute, message): def decorator(original): @wraps(original) def wrapper(self, *args, **kwargs): diff --git a/analyzer/tests/projects/__init__.py b/analyzer/tests/projects/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/analyzer/tests/projects/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/analyzer/tests/unit/test_checker_handling.py b/analyzer/tests/unit/test_checker_handling.py index d2c97615f7..d506cbe1a8 100644 --- a/analyzer/tests/unit/test_checker_handling.py +++ b/analyzer/tests/unit/test_checker_handling.py @@ -11,6 +11,7 @@ """ +# pylint: disable=deprecated-module from distutils import util import os import re @@ -36,25 +37,34 @@ class MockClangsaCheckerLabels: def checkers_by_labels(self, labels): if labels[0] == 'profile:default': return ['core', 'deadcode', 'security.FloatLoopCounter'] - elif labels[0] == 'profile:security': + + if labels[0] == 'profile:security': return ['alpha.security'] - elif labels[0] == 'guideline:sei-cert': + + if labels[0] == 'guideline:sei-cert': return ['alpha.core.CastSize', 'alpha.core.CastToStruct'] - elif labels[0] == 'severity:LOW': + + if labels[0] == 'severity:LOW': return ['security.insecureAPI.bcmp', 'alpha.llvm.Conventions'] + return [] + def get_description(self, label): if label == 'profile': return ['default', 'sensitive', 'security', 'portability', 'extreme'] + return [] def occurring_values(self, label): if label == 'guideline': return ['sei-cert'] - elif label == 'sei-cert': + + if label == 'sei-cert': return ['rule1', 'rule2'] - def checkers(self, analyzer=None): + return [] + + def checkers(self, _=None): return [] @@ -173,79 +183,79 @@ def f(checks, checkers): # "disabled" state. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), default_profile)) - self.assertTrue(all_with_status(CheckerState.disabled) + self.assertTrue(all_with_status(CheckerState.DISABLED) (cfg_handler.checks(), security_profile_alpha)) # "--enable-all" leaves alpha checkers in "disabled" state. Others # become enabled. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, enable_all=True) - self.assertTrue(all_with_status(CheckerState.disabled) + self.assertTrue(all_with_status(CheckerState.DISABLED) (cfg_handler.checks(), security_profile_alpha)) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), default_profile)) # Enable alpha checkers explicitly. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, [('alpha', True)]) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), security_profile_alpha)) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), default_profile)) # Enable "security" profile checkers. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, [('profile:security', True)]) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), security_profile_alpha)) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), default_profile)) # Enable "security" profile checkers without "profile:" prefix. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, [('security', True)]) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), security_profile_alpha)) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), default_profile)) # Enable "sei-cert" guideline checkers. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, [('guideline:sei-cert', True)]) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), cert_guideline)) # Enable "sei-cert" guideline checkers. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, [('sei-cert', True)]) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), cert_guideline)) # Disable "sei-cert" guideline checkers. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, [('guideline:sei-cert', False)]) - self.assertTrue(all_with_status(CheckerState.disabled) + self.assertTrue(all_with_status(CheckerState.DISABLED) (cfg_handler.checks(), cert_guideline)) # Disable "sei-cert" guideline checkers. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, [('sei-cert', False)]) - self.assertTrue(all_with_status(CheckerState.disabled) + self.assertTrue(all_with_status(CheckerState.DISABLED) (cfg_handler.checks(), cert_guideline)) # Enable "LOW" severity checkers. cfg_handler = ClangSA.construct_config_handler(args) cfg_handler.initialize_checkers(checkers, [('severity:LOW', True)]) - self.assertTrue(all_with_status(CheckerState.enabled) + self.assertTrue(all_with_status(CheckerState.ENABLED) (cfg_handler.checks(), low_severity)) # Test if statisticsbased checkers are enabled by --stats flag @@ -261,7 +271,7 @@ def f(checks, checkers): enabled_checkers = ( checker for checker, (enabled, _) in cfg_handler.checks().items() - if enabled == CheckerState.enabled) + if enabled == CheckerState.ENABLED) for stat_checker in statisticsbased: self.assertTrue( @@ -276,19 +286,28 @@ def checkers_by_labels(self, labels): 'bugprone-dangling-handle', 'bugprone-inaccurate-erase'] + return [] + def get_description(self, label): if label == 'profile': return ['default', 'sensitive', 'security', 'portability', 'extreme'] + return [] + def occurring_values(self, label): if label == 'guideline': return ['sei-cert'] elif label == 'sei-cert': return ['rule1', 'rule2'] + return [] + + +def create_analyzer_tidy(args=None): + if args is None: + args = [] -def create_analyzer_tidy(args=[]): cfg_handler = ClangTidy.construct_config_handler(args) action = { @@ -314,7 +333,7 @@ def setUpClass(cls): analyzer = create_analyzer_tidy() result_handler = create_result_handler(analyzer) cls.cmd = analyzer.construct_analyzer_cmd(result_handler) - print('Analyzer command: %s' % cls.cmd) + print(f'Analyzer command: {cls.cmd}') def _enable_disable_pos(self, checker, checks_list): """ @@ -397,7 +416,7 @@ def test_disable_clangsa_checkers(self): self.assertEqual( analyzer.config_handler.checks()['Wreserved-id-macro'][0], - CheckerState.enabled) + CheckerState.ENABLED) def test_analyze_wrong_parameters(self): """ @@ -460,7 +479,8 @@ def test_enable_all_disable_warning(self): pos_disable = analyzer_cmd.index('-Wno-unused-variable') self.assertLess(pos_everything, pos_disable) except ValueError: - self.assertTrue( + # pylint: disable=redundant-unittest-assert + self.assertFalse( False, "-Weverything and -Wno-unused-variable should be in the " "analysis command.") @@ -530,18 +550,24 @@ def checkers_by_labels(self, labels): 'cppcheck-arrayIndexOutOfBounds', 'cppcheck-assertWithSideEffect'] + return [] + def get_description(self, label): if label == 'profile': return ['default', 'sensitive', 'security', 'portability', 'extreme'] + return [] + def occurring_values(self, label): if label == 'guideline': return ['sei-cert'] elif label == 'sei-cert': return ['rule1', 'rule2'] - def checkers(self, analyzer): + return [] + + def checkers(self, _): return [] diff --git a/analyzer/tests/unit/test_checker_labels.py b/analyzer/tests/unit/test_checker_labels.py index 22593e9254..e9ea02751b 100644 --- a/analyzer/tests/unit/test_checker_labels.py +++ b/analyzer/tests/unit/test_checker_labels.py @@ -43,8 +43,9 @@ def initialize_labels_dir(self): } } - with open(os.path.join(self.labels_dir.name, - 'descriptions.json'), 'w') as f: + with open(os.path.join( + self.labels_dir.name, 'descriptions.json'), + 'w', encoding='utf-8') as f: json.dump(descriptions, f) os.mkdir(os.path.join(self.labels_dir.name, 'analyzers')) @@ -77,7 +78,7 @@ def initialize_labels_dir(self): with open(os.path.join(self.labels_dir.name, 'analyzers', - 'clangsa.json'), 'w') as f: + 'clangsa.json'), 'w', encoding='utf-8') as f: json.dump(labels, f) labels = { @@ -109,7 +110,7 @@ def initialize_labels_dir(self): with open(os.path.join(self.labels_dir.name, 'analyzers', - 'clang-tidy.json'), 'w') as f: + 'clang-tidy.json'), 'w', encoding='utf-8') as f: json.dump(labels, f) def test_checker_labels(self): diff --git a/analyzer/tests/unit/test_checker_option_parsing.py b/analyzer/tests/unit/test_checker_option_parsing.py index 01b012abe7..9ee275a416 100644 --- a/analyzer/tests/unit/test_checker_option_parsing.py +++ b/analyzer/tests/unit/test_checker_option_parsing.py @@ -25,7 +25,7 @@ def test_old_format(self): Test parsing of the output of 'clang-tidy -dump-config -checks=*' for clang-tidy up to LLVM 14. """ - OLD_FORMAT_EXAMPLE = """ + old_format_example = """ --- Checks: 'clang-diagnostic-*,clang-analyzer-*,clang-diagnostic-*,\ clang-analyzer-*,bugprone-*,-bugprone-easily-swappable-parameters,\ @@ -51,7 +51,7 @@ def test_old_format(self): - key: bugprone-reserved-identifier.Invert value: 'false' """ - result = clangtidy_parse_checker_config(OLD_FORMAT_EXAMPLE) + result = clangtidy_parse_checker_config(old_format_example) # The result can be an arbitrary iterable of pair-likes. To make # assertions about it easer, we first convert it to a list-of-lists. result = [[k, v] for (k, v) in result] @@ -65,7 +65,7 @@ def test_new_format(self): Test parsing of the output of 'clang-tidy -dump-config -checks=*' for clang-tidy starting with LLVM 15. """ - NEW_FORMAT_EXAMPLE = """ + new_format_example = """ --- Checks: 'clang-diagnostic-*,clang-analyzer-*,*' WarningsAsErrors: '' @@ -80,7 +80,7 @@ def test_new_format(self): bugprone-reserved-identifier.Invert: 'false' cert-dcl16-c.IgnoreMacros: 'true' """ - result = clangtidy_parse_checker_config(NEW_FORMAT_EXAMPLE) + result = clangtidy_parse_checker_config(new_format_example) # The result can be an arbitrary iterable of pair-likes. To make # assertions about it easer, we first convert it to a list-of-lists. result = [[k, v] for (k, v) in result] diff --git a/analyzer/tests/unit/test_compilation_database.py b/analyzer/tests/unit/test_compilation_database.py index 5a65783b26..c308a5a901 100644 --- a/analyzer/tests/unit/test_compilation_database.py +++ b/analyzer/tests/unit/test_compilation_database.py @@ -114,7 +114,7 @@ def test_gather_compilation_database(self): root. """ def compile_commands(comp_db): - return set([comp_action["command"] for comp_action in comp_db]) + return set(comp_action["command"] for comp_action in comp_db) # Check the assumption described in the function's documentation. self.assertTrue( diff --git a/analyzer/tests/unit/test_env_var.py b/analyzer/tests/unit/test_env_var.py index 17b2077ff4..2c1ed54bd9 100644 --- a/analyzer/tests/unit/test_env_var.py +++ b/analyzer/tests/unit/test_env_var.py @@ -33,18 +33,13 @@ def create_analyzer_gcc(): class EnvVarTest(unittest.TestCase): - def setup_class(self): - context = analyzer_context.get_context() - self.__original_analyzer_env = context.analyzer_env - - def teardown_method(self, method): + def teardown_method(self, _): # Reset the environment, and some some initializer methods to hopefully # reset the state of the analyzer context. context = analyzer_context.get_context() - context.__analyzer_env = self.__original_analyzer_env context._Context__populate_analyzers() - def _get_analyzer_bin_for_CC_ANALYZER_BIN(self, analyzer_bin_conf: str): + def _get_analyzer_bin_for_cc_analyzer_bin(self, analyzer_bin_conf: str): """ Set the CC_ANALYZER_BIN env variable, which is an "analyzer plugin" -> "path to binary" @@ -59,7 +54,7 @@ def _get_analyzer_bin_for_CC_ANALYZER_BIN(self, analyzer_bin_conf: str): analyzer = create_analyzer_gcc() return analyzer.analyzer_binary() - def test_CC_ANALYZER_BIN(self): + def test_cc_analyzer_bin(self): """ Test whether GCC runs the appropriate binary when CC_ANALYZER_BIN is set. @@ -68,17 +63,17 @@ def test_CC_ANALYZER_BIN(self): respectively, and check whether the GCC analyzer points to them. Every machine is expected to run some version of gcc, so this should be OK. """ - bin_gcc_var = self._get_analyzer_bin_for_CC_ANALYZER_BIN("gcc:gcc") + bin_gcc_var = self._get_analyzer_bin_for_cc_analyzer_bin("gcc:gcc") self.assertTrue(bin_gcc_var.endswith("gcc")) self.assertTrue(not bin_gcc_var.endswith("g++")) - bin_gpp_var = self._get_analyzer_bin_for_CC_ANALYZER_BIN("gcc:g++") + bin_gpp_var = self._get_analyzer_bin_for_cc_analyzer_bin("gcc:g++") self.assertTrue(bin_gpp_var.endswith("g++")) self.assertTrue(not bin_gpp_var.endswith("gcc")) self.assertNotEqual(bin_gcc_var, bin_gpp_var) - def test_CC_ANALYZER_BIN_overrides_CC_ANALYZERS_FROM_PATH(self): + def test_cc_analyzer_bin_overrides_cc_analyzers_from_path(self): """ Check whether CC_ANALYZER_BIN overrides CC_ANALYZERS_FROM_PATH (which is what we want). @@ -87,11 +82,11 @@ def test_CC_ANALYZER_BIN_overrides_CC_ANALYZERS_FROM_PATH(self): context = analyzer_context.get_context() context.analyzer_env["CC_ANALYZERS_FROM_PATH"] = '1' - bin_gcc_var = self._get_analyzer_bin_for_CC_ANALYZER_BIN("gcc:gcc") + bin_gcc_var = self._get_analyzer_bin_for_cc_analyzer_bin("gcc:gcc") self.assertTrue(bin_gcc_var.endswith("gcc")) self.assertTrue(not bin_gcc_var.endswith("g++")) - bin_gpp_var = self._get_analyzer_bin_for_CC_ANALYZER_BIN("gcc:g++") + bin_gpp_var = self._get_analyzer_bin_for_cc_analyzer_bin("gcc:g++") self.assertTrue(bin_gpp_var.endswith("g++")) self.assertTrue(not bin_gpp_var.endswith("gcc")) diff --git a/analyzer/tests/unit/test_log_parser.py b/analyzer/tests/unit/test_log_parser.py index 9a59b7ec70..acab5cb9f8 100644 --- a/analyzer/tests/unit/test_log_parser.py +++ b/analyzer/tests/unit/test_log_parser.py @@ -498,12 +498,11 @@ def test_response_file_simple(self): """ with open(self.compile_command_file_path, "w", encoding="utf-8", errors="ignore") as build_json: - build_json.write(json.dumps([dict( - directory=self.tmp_dir, - command="g++ {0} @{1}".format(self.src_file_path, - self.rsp_file_path), - file=self.src_file_path - )])) + build_json.write(json.dumps([{ + "directory": self.tmp_dir, + "command": f"g++ {self.src_file_path} @{self.rsp_file_path}", + "file": self.src_file_path + }])) with open(self.rsp_file_path, "w", encoding="utf-8", errors="ignore") as rsp_file: @@ -524,16 +523,15 @@ def test_response_file_contains_source_file(self): """ with open(self.compile_command_file_path, "w", encoding="utf-8", errors="ignore") as build_json: - build_json.write(json.dumps([dict( - directory=self.tmp_dir, - command="g++ @{0}".format(self.rsp_file_path), - file="@{0}".format(self.rsp_file_path) - )])) + build_json.write(json.dumps([{ + "directory": self.tmp_dir, + "command": f"g++ @{self.rsp_file_path}", + "file": f"@{self.rsp_file_path}" + }])) with open(self.rsp_file_path, "w", encoding="utf-8", errors="ignore") as rsp_file: - rsp_file.write("""-DVARIABLE="some value" {0}""".format( - self.src_file_path)) + rsp_file.write(f'-DVARIABLE="some value" {self.src_file_path}') logfile = os.path.join(self.compile_command_file_path) @@ -553,11 +551,11 @@ def test_response_file_contains_multiple_source_files(self): """ with open(self.compile_command_file_path, "w", encoding="utf-8", errors="ignore") as build_json: - build_json.write(json.dumps([dict( - directory=self.tmp_dir, - command="g++ @{0}".format(self.rsp_file_path), - file="@{0}".format(self.rsp_file_path) - )])) + build_json.write(json.dumps([{ + "directory": self.tmp_dir, + "command": f"g++ @{self.rsp_file_path}", + "file": f"@{self.rsp_file_path}" + }])) a_file_path = os.path.join(self.tmp_dir, "a.cpp") with open(a_file_path, "w", @@ -571,8 +569,8 @@ def test_response_file_contains_multiple_source_files(self): with open(self.rsp_file_path, "w", encoding="utf-8", errors="ignore") as rsp_file: - rsp_file.write("""-DVARIABLE="some value" {0} {1}""".format( - a_file_path, b_file_path)) + rsp_file.write( + f'-DVARIABLE="some value" {a_file_path} {b_file_path}') logfile = os.path.join(self.compile_command_file_path) @@ -605,11 +603,11 @@ def test_source_file_contains_at_sign(self): with open(self.compile_command_file_path, "w", encoding="utf-8", errors="ignore") as f: - f.write(json.dumps([dict( - directory=tmp_dir, - command=f"g++ {src_file_path}", - file=src_file_path - )])) + f.write(json.dumps([{ + "directory": tmp_dir, + "command": f"g++ {src_file_path}", + "file": src_file_path + }])) build_actions, _ = log_parser.parse_unique_log(load_json( self.compile_command_file_path), self.__this_dir) @@ -625,39 +623,39 @@ def test_symlink(self): even when symbolic links are present """ - fileA = os.path.join(self.tmp_dir, "mainA.cpp") - fileB = os.path.join(self.tmp_dir, "mainB.cpp") - fileC = os.path.join(self.tmp_dir, "mainC.cpp") + file_a = os.path.join(self.tmp_dir, "mainA.cpp") + file_b = os.path.join(self.tmp_dir, "mainB.cpp") + file_c = os.path.join(self.tmp_dir, "mainC.cpp") - fileA_sym = os.path.join(self.tmp_dir, "mainA_sym.cpp") - fileB_sym = os.path.join(self.tmp_dir, "mainB_sym.cpp") + file_a_sym = os.path.join(self.tmp_dir, "mainA_sym.cpp") + file_b_sym = os.path.join(self.tmp_dir, "mainB_sym.cpp") tmp_symdir = tempfile.mkdtemp() - fileC_symdir = os.path.join(tmp_symdir, "mainC_sym.cpp") + file_c_symdir = os.path.join(tmp_symdir, "mainC_sym.cpp") - os.symlink(fileA, fileA_sym) - os.symlink(fileB, fileB_sym) - os.symlink(fileC, fileC_symdir) + os.symlink(file_a, file_a_sym) + os.symlink(file_b, file_b_sym) + os.symlink(file_c, file_c_symdir) compilation_cmd = [ {"directory": self.tmp_dir, - "command": "g++ " + fileA, - "file": fileA}, + "command": "g++ " + file_a, + "file": file_a}, {"directory": self.tmp_dir, - "command": "g++ " + fileB, - "file": fileB}, + "command": "g++ " + file_b, + "file": file_b}, {"directory": tmp_symdir, - "command": "g++ " + fileC_symdir, - "file": fileC_symdir}, + "command": "g++ " + file_c_symdir, + "file": file_c_symdir}, {"directory": self.tmp_dir, - "command": "g++ " + fileC, - "file": fileC}, + "command": "g++ " + file_c, + "file": file_c}, {"directory": self.tmp_dir, - "command": "g++ " + fileA_sym, - "file": fileA_sym}, + "command": "g++ " + file_a_sym, + "file": file_a_sym}, {"directory": self.tmp_dir, - "command": "g++ " + fileB_sym, - "file": fileB_sym}] + "command": "g++ " + file_b_sym, + "file": file_b_sym}] build_actions, _ = log_parser.parse_unique_log(compilation_cmd, self.__this_dir, @@ -665,7 +663,7 @@ def test_symlink(self): build_action = build_actions[2] self.assertEqual(len(build_actions), 3) - self.assertEqual(build_action.source, fileC_symdir) + self.assertEqual(build_action.source, file_c_symdir) def test_get_log_env(self): """ diff --git a/analyzer/tests/unit/test_option_parser.py b/analyzer/tests/unit/test_option_parser.py index bfed1431d8..142ebbf2e9 100644 --- a/analyzer/tests/unit/test_option_parser.py +++ b/analyzer/tests/unit/test_option_parser.py @@ -301,7 +301,7 @@ def test_ignore_flags_gcc(self): "-mabi=spe", "-mabi=eabi", "-fext-numeric-literals"] action = { 'file': 'main.cpp', - 'command': "g++ {} main.cpp".format(' '.join(ignore)), + 'command': f"g++ {' '.join(ignore)} main.cpp", 'directory': ''} res = log_parser.parse_options(action) self.assertEqual(res.analyzer_options, ["-fsyntax-only"]) @@ -348,7 +348,7 @@ def test_target_parsing_clang(self): def test_ignore_xclang_flags_clang(self): """Skip some specific xclang constructs""" - def fake_clang_version(a, b): + def fake_clang_version(_a, _b): return True clang_flags = ["-std=gnu++14", @@ -364,7 +364,7 @@ def fake_clang_version(a, b): xclang_skip = { "directory": "/tmp", "command": - "clang++ {} -c /tmp/a.cpp".format(' '.join(clang_flags)), + f"clang++ {' '.join(clang_flags)} -c /tmp/a.cpp", "file": "/tmp/a.cpp"} res = log_parser.parse_options( @@ -392,7 +392,7 @@ class FakeClangVersion: log_parser.ImplicitCompilerInfo.compiler_versions["clang++"] =\ fake_clang_version - def fake_clangsa_version_func(compiler, env): + def fake_clangsa_version_func(_compiler, _env): """Return always the fake compiler version""" return fake_clang_version @@ -417,7 +417,7 @@ def test_keep_clang_flags(self): "--target=something"] action = { 'file': 'main.cpp', - 'command': "clang++ {} main.cpp".format(' '.join(keep)), + 'command': f"clang++ {' '.join(keep)} main.cpp", 'directory': ''} class FakeClangVersion: @@ -429,7 +429,7 @@ class FakeClangVersion: log_parser.ImplicitCompilerInfo.compiler_versions["clang++"] =\ fake_clang_version - def fake_clangsa_version_func(compiler, env): + def fake_clangsa_version_func(_compiler, _env): """Return always the fake compiler version""" return fake_clang_version @@ -444,15 +444,15 @@ def test_preserve_flags(self): preserve = ['-nostdinc', '-nostdinc++', '-pedantic'] action = { 'file': 'main.cpp', - 'command': "g++ {} main.cpp".format(' '.join(preserve)), + 'command': f"g++ {' '.join(preserve)} main.cpp", 'directory': ''} res = log_parser.parse_options(action) self.assertEqual(res.analyzer_options, preserve) - def is_compiler_executable_fun(self, compiler): + def is_compiler_executable_fun(self, _): return True - def is_compiler_executable_fun_false(self, compiler): + def is_compiler_executable_fun_false(self, _): return False def test_compiler_toolchain(self): @@ -515,11 +515,11 @@ def test_compiler_gcc_implicit_includes(self): # directory among the implicit include paths. Otherwise this test may # fail. res = log_parser.parse_options(action, keep_gcc_include_fixed=False) - self.assertFalse(any([x.endswith('include-fixed') - for x in res.compiler_includes])) + self.assertFalse(any(x.endswith('include-fixed') + for x in res.compiler_includes)) res = log_parser.parse_options(action, keep_gcc_include_fixed=True) - self.assertTrue(any([x.endswith('include-fixed') - for x in res.compiler_includes])) + self.assertTrue(any(x.endswith('include-fixed') + for x in res.compiler_includes)) def test_compiler_intrin_headers(self): """ Include directories with *intrin.h files should be skipped.""" diff --git a/analyzer/tests/unit/test_result_handler.py b/analyzer/tests/unit/test_result_handler.py index c36385cff7..eabe993c36 100644 --- a/analyzer/tests/unit/test_result_handler.py +++ b/analyzer/tests/unit/test_result_handler.py @@ -36,14 +36,14 @@ def random_string(): rh = ResultHandler(ba, '/tmp/workspace') rh.analyzed_source_file = 'main.cpp' - ba.original_command = 'g++ main.cpp -o {} -o{}'.format( - random_string(), random_string()) + ba.original_command = \ + f'g++ main.cpp -o {random_string()} -o{random_string()}' self.assertEqual( rh.analyzer_action_str, 'main.cpp_clangsa_b42298618a535959e9adc7807414763c') - ba.original_command = 'g++ main.cpp -o {} -o{} -W -O3'.format( - random_string(), random_string()) + ba.original_command = \ + f'g++ main.cpp -o {random_string()} -o{random_string()} -W -O3' self.assertEqual( rh.analyzer_action_str, 'main.cpp_clangsa_193423e3c13026c10bc1457b7434a25a') diff --git a/analyzer/tests/unit/test_review_status_config.py b/analyzer/tests/unit/test_review_status_config.py index c276c644a4..843b180d17 100644 --- a/analyzer/tests/unit/test_review_status_config.py +++ b/analyzer/tests/unit/test_review_status_config.py @@ -6,8 +6,6 @@ # # ------------------------------------------------------------------------- -"""TODO""" - import os import unittest @@ -21,28 +19,19 @@ class ReviewStatusHandlerTest(unittest.TestCase): Test the build command escaping and execution. """ - @classmethod - def setup_class(self): + def __init__(self, methodName): global TEST_WORKSPACE TEST_WORKSPACE = env.get_workspace('review_status_config') - os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE - self._test_workspace = os.environ['TEST_WORKSPACE'] - @classmethod - def teardown_class(self): - pass - - def setup_method(self, method): + self._test_workspace = os.environ['TEST_WORKSPACE'] self.rshandler = ReviewStatusHandler(None) - pass - def teardown_method(self, method): - pass + super().__init__(methodName) def __put_in_review_status_cfg_file(self, file_contents: str) -> str: rs_cfg = os.path.join(self._test_workspace, "review_status.yaml") - with open(rs_cfg, "w") as f: + with open(rs_cfg, "w", encoding='utf-8') as f: f.write(file_contents) return rs_cfg diff --git a/analyzer/tests/unit/test_subprocess_timeout.py b/analyzer/tests/unit/test_subprocess_timeout.py index a5eb46db90..8bd3fe4f3f 100644 --- a/analyzer/tests/unit/test_subprocess_timeout.py +++ b/analyzer/tests/unit/test_subprocess_timeout.py @@ -21,12 +21,12 @@ from codechecker_analyzer.analysis_manager import setup_process_timeout -class subprocess_timeoutTest(unittest.TestCase): +class SubprocessTimeoutTest(unittest.TestCase): """ Test the process timeout watcher functionality. """ - def testTimeoutWithProcessFinishing(self): + def test_timeout_with_process_finishing(self): """ Test if process timeout watcher recognises if a process ended gracefully before the timeout expired. @@ -39,7 +39,7 @@ def testTimeoutWithProcessFinishing(self): preexec_fn=os.setpgrp, encoding="utf-8", errors="ignore") - print("Started `echo` with PID {0}".format(proc.pid)) + print(f"Started `echo` with PID {proc.pid}") future = setup_process_timeout(proc, 5, signal.SIGKILL) @@ -52,7 +52,7 @@ def testTimeoutWithProcessFinishing(self): "Process timeout watcher said it killed the " "process, but it should have exited long beforehand.") - def testTimeoutWithLongRunning(self): + def test_timeout_with_long_running(self): """ Test if process timeout watcher kills the process that runs too long, and properly reports that it was killed. @@ -66,7 +66,7 @@ def testTimeoutWithLongRunning(self): preexec_fn=os.setpgrp, encoding="utf-8", errors="ignore") - print("Started `yes` with PID {0}".format(proc.pid)) + print(f"Started `yes` with PID {proc.pid}") future = setup_process_timeout(proc, 5) diff --git a/analyzer/tools/build-logger/requirements_py/dev/requirements.txt b/analyzer/tools/build-logger/requirements_py/dev/requirements.txt index ab2e19281b..9685d12ba7 100644 --- a/analyzer/tools/build-logger/requirements_py/dev/requirements.txt +++ b/analyzer/tools/build-logger/requirements_py/dev/requirements.txt @@ -1,3 +1,3 @@ pytest==7.3.1 pycodestyle==2.12.0 -pylint==2.8.2 +pylint==3.2.4 diff --git a/analyzer/tools/build-logger/tests/unit/__init__.py b/analyzer/tools/build-logger/tests/unit/__init__.py index 5c194a3b8b..bc442cbb15 100644 --- a/analyzer/tools/build-logger/tests/unit/__init__.py +++ b/analyzer/tools/build-logger/tests/unit/__init__.py @@ -69,7 +69,6 @@ def run_command( class BasicLoggerTest(unittest.TestCase): def setUp(self): - self.maxDiff = None fd, self.source_file = tempfile.mkstemp( suffix=".cpp", prefix="logger-test-source-", text=True ) diff --git a/analyzer/tools/merge_clang_extdef_mappings/requirements_py/dev/requirements.txt b/analyzer/tools/merge_clang_extdef_mappings/requirements_py/dev/requirements.txt index ab2e19281b..9685d12ba7 100644 --- a/analyzer/tools/merge_clang_extdef_mappings/requirements_py/dev/requirements.txt +++ b/analyzer/tools/merge_clang_extdef_mappings/requirements_py/dev/requirements.txt @@ -1,3 +1,3 @@ pytest==7.3.1 pycodestyle==2.12.0 -pylint==2.8.2 +pylint==3.2.4 diff --git a/analyzer/tools/statistics_collector/requirements_py/dev/requirements.txt b/analyzer/tools/statistics_collector/requirements_py/dev/requirements.txt index ab2e19281b..9685d12ba7 100644 --- a/analyzer/tools/statistics_collector/requirements_py/dev/requirements.txt +++ b/analyzer/tools/statistics_collector/requirements_py/dev/requirements.txt @@ -1,3 +1,3 @@ pytest==7.3.1 pycodestyle==2.12.0 -pylint==2.8.2 +pylint==3.2.4 diff --git a/bin/CodeChecker b/bin/CodeChecker index a0f1d785a0..ad820b8a05 100755 --- a/bin/CodeChecker +++ b/bin/CodeChecker @@ -6,11 +6,15 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- + """ Used to kickstart CodeChecker. Save original environment without modifications. Used to run the logging in the same env. """ +# This is for enabling CodeChecker as a filename (i.e. module name). +# pylint: disable=invalid-name +# pylint: enable=invalid-name import os import pickle @@ -20,7 +24,7 @@ import subprocess import sys import tempfile -proc_pid = None +PROC_PID = None def run_codechecker(checker_env, subcommand=None): @@ -56,8 +60,8 @@ def run_codechecker(checker_env, subcommand=None): encoding="utf=8", errors="ignore", env=checker_env) - global proc_pid - proc_pid = proc.pid + global PROC_PID + PROC_PID = proc.pid proc.wait() sys.exit(proc.returncode) @@ -90,10 +94,10 @@ def main(subcommand=None): print('Saving original build environment failed.') print(ex) - def signal_term_handler(signum, frame): - global proc_pid - if proc_pid and not sys.platform == "win32": - os.kill(proc_pid, signal.SIGINT) + def signal_term_handler(signum, _frame): + global PROC_PID + if PROC_PID and sys.platform != "win32": + os.kill(PROC_PID, signal.SIGINT) _remove_tmp() sys.exit(128 + signum) @@ -101,12 +105,12 @@ def main(subcommand=None): signal.signal(signal.SIGTERM, signal_term_handler) signal.signal(signal.SIGINT, signal_term_handler) - def signal_reload_handler(sig, frame): - global proc_pid - if proc_pid: - os.kill(proc_pid, signal.SIGHUP) + def signal_reload_handler(_sig, _frame): + global PROC_PID + if PROC_PID: + os.kill(PROC_PID, signal.SIGHUP) - if not sys.platform == "win32": + if sys.platform != "win32": signal.signal(signal.SIGHUP, signal_reload_handler) try: diff --git a/codechecker_common/checker_labels.py b/codechecker_common/checker_labels.py index 710cf7f493..b359ea74e6 100644 --- a/codechecker_common/checker_labels.py +++ b/codechecker_common/checker_labels.py @@ -116,12 +116,11 @@ def is_unique(labels: Iterable[str], label: str): if k == label: if found: return False - else: - found = True + found = True return True if not isinstance(data, dict): - raise ValueError(f'Top level element should be a JSON object.') + raise ValueError('Top level element should be a JSON object.') for _, checkers in data.items(): for checker, labels in checkers.items(): @@ -137,8 +136,8 @@ def is_unique(labels: Iterable[str], label: str): for unique_label in CheckerLabels.UNIQUE_LABELS: if not is_unique(labels, unique_label): raise ValueError( - f'Label "severity" should be unique for checker ' - '{checker}.') + 'Label "severity" should be unique for checker ' + f'{checker}.') def __get_analyzer_data( self, diff --git a/codechecker_common/cli.py b/codechecker_common/cli.py index 4e3113ee50..b7f697b45b 100755 --- a/codechecker_common/cli.py +++ b/codechecker_common/cli.py @@ -108,7 +108,7 @@ def main(): with open(commands_cfg, encoding="utf-8", errors="ignore") as cfg_file: subcommands = json.load(cfg_file) - def signal_handler(signum, frame): + def signal_handler(signum, _): """ Without this handler the PostgreSQL server does not terminate at signal. @@ -186,10 +186,9 @@ def signal_handler(signum, frame): logger.setup_logger( args.verbose if 'verbose' in args else None, 'stderr') - LOG = logger.get_logger('system') + log = logger.get_logger('system') - if len(sys.argv) > 1: - called_sub_command = sys.argv[1] + called_sub_command = sys.argv[1] cfg_args = args.func_process_config_file(args, called_sub_command) if cfg_args: @@ -203,7 +202,7 @@ def signal_handler(signum, frame): sys.argv[cfg_idx + 2:] args = parser.parse_args() - LOG.info("Full extended command: %s", ' '.join(sys.argv)) + log.info("Full extended command: %s", ' '.join(sys.argv)) if 'func' in args: sys.exit(args.func(args)) diff --git a/codechecker_common/cmd_config.py b/codechecker_common/cmd_config.py index 7d3d9f2348..b19a0393b6 100644 --- a/codechecker_common/cmd_config.py +++ b/codechecker_common/cmd_config.py @@ -84,6 +84,8 @@ def process_config_file(args, subcommand_name): return options + return {} + def check_config_file(args): """Check if a config file is set in the arguments and if the file exists. diff --git a/codechecker_common/compatibility/multiprocessing.py b/codechecker_common/compatibility/multiprocessing.py index 1504804d0c..14ef7ebebe 100644 --- a/codechecker_common/compatibility/multiprocessing.py +++ b/codechecker_common/compatibility/multiprocessing.py @@ -10,7 +10,8 @@ """ import sys -# pylint: disable=no-name-in-module,unused-import +# pylint: disable=no-name-in-module +# pylint: disable=unused-import if sys.platform in ["darwin", "win32"]: from multiprocess import Pool # type: ignore from multiprocess import cpu_count diff --git a/codechecker_common/logger.py b/codechecker_common/logger.py index 27898ec2b2..8c860dee6e 100644 --- a/codechecker_common/logger.py +++ b/codechecker_common/logger.py @@ -5,8 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- -""" -""" import argparse @@ -32,9 +30,6 @@ class CCLogger(logging.Logger): - def __init__(self, name, level=NOTSET): - super(CCLogger, self).__init__(name, level) - def debug_analyzer(self, msg, *args, **kwargs): if self.isEnabledFor(logging.DEBUG_ANALYZER): self._log(logging.DEBUG_ANALYZER, msg, args, **kwargs) @@ -118,7 +113,7 @@ def validate_loglvl(log_level): return log_level -class LOG_CFG_SERVER: +class LogCfgServer: """ Initialize a log configuration server for dynamic log configuration. The log config server will only be started if the @@ -156,19 +151,19 @@ def setup_logger(log_level=None, stream=None, workspace=None): be given (stderr -> ext://sys.stderr, 'stdout' -> ext://sys.stdout). """ - LOG_CONFIG = json.loads(DEFAULT_LOG_CONFIG) + log_config = json.loads(DEFAULT_LOG_CONFIG) if log_level: log_level = validate_loglvl(log_level) - loggers = LOG_CONFIG.get("loggers", {}) + loggers = log_config.get("loggers", {}) for k in loggers.keys(): - LOG_CONFIG['loggers'][k]['level'] = log_level + log_config['loggers'][k]['level'] = log_level - handlers = LOG_CONFIG.get("handlers", {}) + handlers = log_config.get("handlers", {}) for k in handlers.keys(): - LOG_CONFIG['handlers'][k]['level'] = log_level - if log_level == 'DEBUG' or log_level == 'DEBUG_ANALYZER': - LOG_CONFIG['handlers'][k]['formatter'] = 'precise' + log_config['handlers'][k]['level'] = log_level + if log_level in ('DEBUG', 'DEBUG_ANALYZER'): + log_config['handlers'][k]['formatter'] = 'precise' if stream: if stream == 'stderr': @@ -176,9 +171,9 @@ def setup_logger(log_level=None, stream=None, workspace=None): elif stream == 'stdout': stream = 'ext://sys.stdout' - handlers = LOG_CONFIG.get("handlers", {}) + handlers = log_config.get("handlers", {}) for k in handlers.keys(): - handler = LOG_CONFIG['handlers'][k] + handler = log_config['handlers'][k] if 'stream' in handler: handler['stream'] = stream @@ -190,10 +185,10 @@ def setup_logger(log_level=None, stream=None, workspace=None): if workspace: # Add file_handler to store_time logger, # and add the handler to the config - loggers = LOG_CONFIG.get("loggers", {}) + loggers = log_config.get("loggers", {}) loggers["store_time"]["handlers"].append('store_time_file_handler') - handlers = LOG_CONFIG.get("handlers", {}) + handlers = log_config.get("handlers", {}) log_path = Path(workspace, "store_time.log") handlers["store_time_file_handler"] = {} store_time_handler = { @@ -204,4 +199,4 @@ def setup_logger(log_level=None, stream=None, workspace=None): 'interval': 7} handlers["store_time_file_handler"] = store_time_handler - config.dictConfig(LOG_CONFIG) + config.dictConfig(log_config) diff --git a/codechecker_common/review_status_handler.py b/codechecker_common/review_status_handler.py index a8fa901f40..20c40207a3 100644 --- a/codechecker_common/review_status_handler.py +++ b/codechecker_common/review_status_handler.py @@ -235,6 +235,7 @@ def set_review_status_config(self, config_file): try: self.__data = yaml.safe_load(f) except yaml.YAMLError as err: + # pylint: disable=raise-missing-from raise ValueError( f"Invalid YAML format in {self.__review_status_yaml}:\n" f"{err}") @@ -344,7 +345,8 @@ def get_review_status_from_source( message=message.encode('utf-8'), bug_hash=report.report_hash, in_source=True) - elif len(src_comment_data) > 1: + + if len(src_comment_data) > 1: raise ValueError( f"Multiple source code comments can be found for " f"'{report.checker_name}' checker in '{source_file_name}' " diff --git a/codechecker_common/skiplist_handler.py b/codechecker_common/skiplist_handler.py index 86f3fe43ed..f5c2c50bea 100644 --- a/codechecker_common/skiplist_handler.py +++ b/codechecker_common/skiplist_handler.py @@ -5,8 +5,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- -""" -""" import fnmatch diff --git a/codechecker_common/source_code_comment_handler.py b/codechecker_common/source_code_comment_handler.py index 3b69c8e62a..7536116099 100644 --- a/codechecker_common/source_code_comment_handler.py +++ b/codechecker_common/source_code_comment_handler.py @@ -44,7 +44,6 @@ def contains_codechecker_comment(fp): class SpellException(Exception): """Exception for the review comment spell errors.""" - pass class SourceCodeComment: @@ -77,8 +76,8 @@ def __eq__(self, other) -> bool: self.line == other.line raise NotImplementedError( - "Comparison SourceCodeComment object with '%s' is not supported", - type(other)) + f"Comparison SourceCodeComment object with '{type(other)}' is not " + "supported") def __repr__(self): return json.dumps(self.to_json()) diff --git a/codechecker_common/util.py b/codechecker_common/util.py index fdedb35322..3d0e88455b 100644 --- a/codechecker_common/util.py +++ b/codechecker_common/util.py @@ -25,8 +25,7 @@ def arg_match(options, args): that are present in parameter 'args'.""" matched_args = [] for option in options: - if any([arg if option.startswith(arg) else None - for arg in args]): + if any(arg if option.startswith(arg) else None for arg in args): matched_args.append(option) continue @@ -74,10 +73,6 @@ def load_json(path: str, default=None, lock=False, display_warning=True): if lock: portalocker.unlock(handle) - except IOError as ex: - if display_warning: - LOG.warning("Failed to open json file: %s", path) - LOG.warning(ex) except OSError as ex: if display_warning: LOG.warning("Failed to open json file: %s", path) diff --git a/scripts/build/create_commands.py b/scripts/build/create_commands.py index 5367756e82..28401f1a35 100755 --- a/scripts/build/create_commands.py +++ b/scripts/build/create_commands.py @@ -57,7 +57,7 @@ def copy_files(files, target_dir): shutil.copy2(f, target_dir) -if __name__ == "__main__": +def main(): description = '''CodeChecker copy entry point sub-commands''' parser = argparse.ArgumentParser( @@ -98,3 +98,7 @@ def copy_files(files, target_dir): copy_files( args['bin_file'], os.path.join(args['build_dir'], 'CodeChecker', 'bin')) + + +if __name__ == "__main__": + main() diff --git a/scripts/build/extend_version_file.py b/scripts/build/extend_version_file.py index 3dc4e8c08f..dbefaade73 100755 --- a/scripts/build/extend_version_file.py +++ b/scripts/build/extend_version_file.py @@ -34,11 +34,11 @@ def extend_with_git_information(repository_root, version_json_data): version = version_json_data['version'] version_string = str(version['major']) if int(version['minor']) != 0 or int(version['revision']) != 0: - version_string += ".{0}".format(version['minor']) + version_string += f".{version['minor']}" if int(version['revision']) != 0: - version_string += ".{0}".format(version['revision']) + version_string += f".{version['revision']}" if version['rc'] != '' and int(version['rc']) != 0: - version_string += "-rc{0}".format(version['rc']) + version_string += f"-rc{version['rc']}" LOG.info("This is CodeChecker v%s", version_string) @@ -123,7 +123,7 @@ def extend_version_file(repository_root, version_file): LOG.debug(json.dumps(version_json_data, sort_keys=True, indent=2)) -if __name__ == "__main__": +def main(): description = '''CodeChecker extend version file''' parser = argparse.ArgumentParser( @@ -154,3 +154,7 @@ def extend_version_file(repository_root, version_file): for version_file in args['versionfile']: LOG.info("Extending version file '%s'.", version_file) extend_version_file(args['repository'], version_file) + + +if __name__ == "__main__": + main() diff --git a/scripts/build/wrap_binary_in_venv.py b/scripts/build/wrap_binary_in_venv.py index 4944c590a6..ee73e3db37 100755 --- a/scripts/build/wrap_binary_in_venv.py +++ b/scripts/build/wrap_binary_in_venv.py @@ -11,7 +11,7 @@ from os.path import abspath, dirname, exists from stat import S_IXUSR, S_IXGRP, S_IXOTH from string import Template -from sys import exit +import sys LOG = getLogger('VirtualenvWrapper') @@ -57,10 +57,10 @@ def generate_content(binary_path, virtual_environment_path): def create_wrapper_file(path, content): path = abspath(path) - dir = dirname(path) + directory = dirname(path) - if not exists(dir): - makedirs(dir) + if not exists(directory): + makedirs(directory) with open(path, 'w', encoding="utf-8", errors="ignore") as wrapper_file: wrapper_file.write(content) @@ -91,7 +91,7 @@ def create_venv_wrapper(**args): if not exists(binary_path): LOG.error("Binary path '%s' does not exist!", binary_path) - exit(1) + sys.exit(1) add_executable_permission(binary_path) @@ -101,7 +101,7 @@ def create_venv_wrapper(**args): create_wrapper_file(output_path, wrapper_content) -if __name__ == "__main__": +def main(): parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, description="""Wrap python binaries in virtualenv. This utility can be @@ -145,3 +145,7 @@ def create_venv_wrapper(**args): args['environment']) create_venv_wrapper(**args) + + +if __name__ == "__main__": + main() diff --git a/scripts/debug_tools/__init__.py b/scripts/debug_tools/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/scripts/debug_tools/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/scripts/debug_tools/failure_lib.py b/scripts/debug_tools/failure_lib.py index b1bc262c7a..2e0a37874c 100644 --- a/scripts/debug_tools/failure_lib.py +++ b/scripts/debug_tools/failure_lib.py @@ -28,7 +28,7 @@ def find_path_end(string, path_begin): return path, path_end -def change_paths(string, pathModifierFun): +def change_paths(string, path_modifier_fun): """ Scan through the string and possibly replace all found paths. Returns the modified string. @@ -47,7 +47,7 @@ def change_paths(string, pathModifierFun): out_dir = "./sources-root" + os.path.dirname(path) if not os.path.isdir(out_dir): os.makedirs(out_dir) - path = pathModifierFun(path) + path = path_modifier_fun(path) result += path i = path_end - 1 else: @@ -89,8 +89,8 @@ def get_resource_dir(clang_bin): if proc.returncode == 0: return out.decode("utf-8").rstrip() - else: - return None + + return None except OSError: print('Failed to run: "' + ' '.join(cmd) + '"') raise diff --git a/scripts/debug_tools/prepare_all_cmd_for_ctu.py b/scripts/debug_tools/prepare_all_cmd_for_ctu.py index 60afddf00e..f233ba916e 100755 --- a/scripts/debug_tools/prepare_all_cmd_for_ctu.py +++ b/scripts/debug_tools/prepare_all_cmd_for_ctu.py @@ -20,12 +20,12 @@ import prepare_analyzer_cmd -def execute(cmd, env): +def execute(cmd, environ): print("Executing command: " + ' '.join(cmd)) try: proc = subprocess.Popen( cmd, - env=env, + env=environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", @@ -37,7 +37,7 @@ def execute(cmd, env): if proc.returncode != 0: print('Unsuccessful run: "' + ' '.join(cmd) + '"') - raise Exception("Unsuccessful run of command.") + raise OSError("Unsuccessful run of command.") return out except OSError: print('Failed to run: "' + ' '.join(cmd) + '"') @@ -57,7 +57,7 @@ def get_triple_arch(analyze_command_file): return platform.machine() -if __name__ == '__main__': +def main(): parser = argparse.ArgumentParser( description='Prepare all commands ' 'to execute in local environmennt for debugging.') @@ -113,11 +113,11 @@ def get_triple_arch(analyze_command_file): env = os.environ env['PATH'] = f"{os.path.dirname(args.clang)}:{env['PATH']}" env['CC_ANALYZERS_FROM_PATH'] = 'yes' - out = execute(["CodeChecker", "analyze", "--ctu-collect", - compile_cmd_debug, - "--compiler-info-file", compiler_info_debug, - "-o", "report_debug", - "--verbose", "debug"], env) + execute(["CodeChecker", "analyze", "--ctu-collect", + compile_cmd_debug, + "--compiler-info-file", compiler_info_debug, + "-o", "report_debug", + "--verbose", "debug"], env) analyzer_command_debug = "analyzer-command_DEBUG" target = get_triple_arch('./analyzer-command') @@ -136,5 +136,8 @@ def get_triple_arch(analyze_command_file): print( "Preparation of files for debugging is done. " "Now you can execute the generated analyzer command. " - "E.g. $ bash %s" % - analyzer_command_debug) + f"E.g. $ bash {analyzer_command_debug}") + + +if __name__ == '__main__': + main() diff --git a/scripts/debug_tools/prepare_analyzer_cmd.py b/scripts/debug_tools/prepare_analyzer_cmd.py index 67d826d260..8e5c070b03 100755 --- a/scripts/debug_tools/prepare_analyzer_cmd.py +++ b/scripts/debug_tools/prepare_analyzer_cmd.py @@ -78,18 +78,18 @@ def __init__( self.ctu_dir = ctu_dir -def prepare(analyzer_command_file, pathOptions): +def prepare(analyzer_command_file, path_options): res = lib.change_paths(get_first_line_of_file(analyzer_command_file), - AnalyzerCommandPathModifier(pathOptions)) + AnalyzerCommandPathModifier(path_options)) if '-nobuiltininc' not in res: return res # Find Clang include path - clang_include_path = lib.get_resource_dir(pathOptions.clang) + '/include' + clang_include_path = lib.get_resource_dir(path_options.clang) + '/include' if clang_include_path is None: - clang_lib_path = os.path.dirname(pathOptions.clang) + '/../lib' + clang_lib_path = os.path.dirname(path_options.clang) + '/../lib' clang_include_path = '' for path, _, files in os.walk(clang_lib_path): if 'stddef.h' in files: @@ -103,7 +103,7 @@ def prepare(analyzer_command_file, pathOptions): '-nobuiltininc -isystem ' + clang_include_path) -if __name__ == '__main__': +def main(): parser = argparse.ArgumentParser(description='Prepare analyzer-command ' 'to execute in local environmennt.') parser.add_argument( @@ -148,5 +148,8 @@ def prepare(analyzer_command_file, pathOptions): print( "Preparation of files for debugging is done. " "Now you can execute the generated analyzer command. " - "E.g. $ bash %s" % - args.output) + f"E.g. $ bash {args.output}") + + +if __name__ == '__main__': + main() diff --git a/scripts/debug_tools/prepare_compile_cmd.py b/scripts/debug_tools/prepare_compile_cmd.py index b6dfe81490..4b88f41bb4 100755 --- a/scripts/debug_tools/prepare_compile_cmd.py +++ b/scripts/debug_tools/prepare_compile_cmd.py @@ -15,7 +15,7 @@ import failure_lib as lib -def existsInSourcesRoot(entry, sources_root): +def exists_in_source_root(entry, sources_root): """ Returns true if the given file in the compile commands really available in the sources-root dir @@ -43,7 +43,7 @@ def prepare(compile_command_json, sources_root): result_json = [] sources_root_abs = os.path.abspath(sources_root) for entry in json_data: - if not existsInSourcesRoot(entry, sources_root): + if not exists_in_source_root(entry, sources_root): continue entry['directory'] =\ @@ -59,9 +59,9 @@ def prepare(compile_command_json, sources_root): pass cmd = entry['command'] - compiler, compilerEnd = lib.find_path_end(cmd.lstrip(), 0) + compiler, compiler_end = lib.find_path_end(cmd.lstrip(), 0) entry['command'] = compiler +\ - lib.change_paths(cmd[compilerEnd:], + lib.change_paths(cmd[compiler_end:], lib.IncludePathModifier(sources_root_abs)) entry['file'] =\ diff --git a/scripts/debug_tools/prepare_compiler_info.py b/scripts/debug_tools/prepare_compiler_info.py index 5e5441fc95..0f0e34ddde 100755 --- a/scripts/debug_tools/prepare_compiler_info.py +++ b/scripts/debug_tools/prepare_compiler_info.py @@ -19,9 +19,9 @@ def _try_prepare_as_old_format(compiler_info_file, sources_root): json_data = lib.load_json_file(compiler_info_file) sources_root_abs = os.path.abspath(sources_root) - new_json_data = dict() + new_json_data = {} for compiler in json_data: - new_json_data[compiler] = dict() + new_json_data[compiler] = {} for language in json_data[compiler]: lines = json_data[compiler][language]['compiler_includes'] changed_lines = [] @@ -42,10 +42,10 @@ def _try_prepare_as_old_format(compiler_info_file, sources_root): def _try_prepare_as_new_format(compiler_info_file, sources_root): json_data = lib.load_json_file(compiler_info_file) sources_root_abs = os.path.abspath(sources_root) - new_json_data = dict() + new_json_data = {} for compiler_id_string in json_data: - new_json_data[compiler_id_string] = dict() + new_json_data[compiler_id_string] = {} include_paths = json_data[compiler_id_string]['compiler_includes'] changed_includes = [ lib.change_paths(p, lib.IncludePathModifier(sources_root_abs)) @@ -63,7 +63,7 @@ def _try_prepare_as_new_format(compiler_info_file, sources_root): def prepare(compiler_info_file, sources_root): try: return _try_prepare_as_new_format(compiler_info_file, sources_root) - except: + except Exception: print(f"Failed to parse {compiler_info_file} in the 'new' " f"compiler_info format; falling back to the old format...") return _try_prepare_as_old_format(compiler_info_file, sources_root) diff --git a/scripts/debug_tools/renew_info_files.py b/scripts/debug_tools/renew_info_files.py index af51ed0208..027638cc62 100755 --- a/scripts/debug_tools/renew_info_files.py +++ b/scripts/debug_tools/renew_info_files.py @@ -138,12 +138,12 @@ def create_compiler_info_json(old_info, filepath): (c) default compiler standard (string). filepath : Path to 'compiler_info.json' file that should be created. """ - info = dict() + info = {} for compiler in old_info: include_paths = process_includes(old_info[compiler]['includes']) for idx, _ in enumerate(include_paths): - include_paths[idx] = "-isystem %s" % include_paths[idx] + include_paths[idx] = f"-isystem {include_paths[idx]}" compiler_data = { "includes": include_paths, "target": process_target(old_info[compiler]['target']), @@ -163,7 +163,7 @@ def create_compiler_info_json(old_info, filepath): json.dump(info, dest) -if __name__ == '__main__': +def main(): parser = argparse.ArgumentParser( description="Convert old compiler info or even older compiler " "target and includes files into one info file that " @@ -177,7 +177,7 @@ def create_compiler_info_json(old_info, filepath): args = parser.parse_args() if not os.path.isdir(args.dir): - LOG.error("%s is not a directory" % args.dir) + LOG.error("%s is not a directory", args.dir) sys.exit(1) target_file = os.path.join(args.dir, 'compiler_target.json') @@ -185,7 +185,7 @@ def create_compiler_info_json(old_info, filepath): if not os.path.isfile(info_file) and not os.path.isfile(target_file): LOG.error("Neither an old-version 'compiler_info.json' nor a " - "'compiler_target.json' could be found in '%s'." % args.dir) + "'compiler_target.json' could be found in '%s'.", args.dir) sys.exit(2) if os.path.isfile(info_file): @@ -222,7 +222,7 @@ def create_compiler_info_json(old_info, filepath): if not os.path.isfile(includes_file): # There is no 'compiler_includes.json' to match the target file. - LOG.error("'compiler_includes.json' not found in %s" % args.dir) + LOG.error("'compiler_includes.json' not found in %s", args.dir) sys.exit(5) LOG.info("'compiler_[includes/target].json' files detected.") @@ -235,7 +235,7 @@ def create_compiler_info_json(old_info, filepath): target = json.loads(src.read()) # Unify information from the two files. - old_info = dict() + old_info = {} for compiler in includes: old_info[compiler] = {"includes": includes[compiler], "target": target[compiler], @@ -249,3 +249,7 @@ def create_compiler_info_json(old_info, filepath): LOG.info("Old 'compiler_[includes/target].json' files removed.") LOG.info("New 'compiler_info.json' file created.") + + +if __name__ == '__main__': + main() diff --git a/scripts/gerrit_changed_files_to_skipfile.py b/scripts/gerrit_changed_files_to_skipfile.py index 3ad21a332f..9d122dfd9a 100755 --- a/scripts/gerrit_changed_files_to_skipfile.py +++ b/scripts/gerrit_changed_files_to_skipfile.py @@ -24,7 +24,7 @@ def create_skipfile(files_changed, skipfile): for filename in json.loads(line): if "/COMMIT_MSG" in filename: continue - skipfile.write("+*/%s\n" % filename) + skipfile.write(f"+*/{filename}\n") skipfile.write("-*\n") diff --git a/scripts/labels/__init__.py b/scripts/labels/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/scripts/labels/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/scripts/labels/compiler_warnings.py b/scripts/labels/compiler_warnings.py index fc7be230dd..1a128d2e90 100644 --- a/scripts/labels/compiler_warnings.py +++ b/scripts/labels/compiler_warnings.py @@ -63,7 +63,7 @@ def main(): labels[checker_name].append(f"severity:{severity}") labels_data["labels"] = dict(sorted(labels.items())) - with open(args.label_file, 'w') as f: + with open(args.label_file, 'w', encoding='utf-8') as f: json.dump(labels_data, f, indent=2) diff --git a/scripts/labels/doc_url_generate.py b/scripts/labels/doc_url_generate.py index ddeaf1fafc..f149a987a8 100644 --- a/scripts/labels/doc_url_generate.py +++ b/scripts/labels/doc_url_generate.py @@ -18,12 +18,14 @@ def clangsa(label_file): for x in root.findall('.//{*}a[@title="Permalink to this headline"]'): checker_anchors.append(x.attrib['href'].lstrip('#')) - with open(label_file) as f: + with open(label_file, encoding='utf-8') as f: checkers = json.load(f)['labels'].keys() docs = {} for checker in checkers: c = checker.lower().replace('.', '-') + # next() evaluates the generator immediately. + # pylint: disable=cell-var-from-loop anchor = next(filter( lambda anchor: anchor.startswith(c), checker_anchors), None) @@ -44,12 +46,14 @@ def clang_tidy(label_file): for x in root.findall('.//{*}a[@class="reference external"]'): checker_anchors.append(x.attrib['href']) - with open(label_file) as f: + with open(label_file, encoding='utf-8') as f: checkers = json.load(f)['labels'].keys() url = url[:url.rfind('/') + 1] docs = {} for checker in checkers: + # next() evaluates the generator immediately. + # pylint: disable=cell-var-from-loop anchor = next(filter( lambda anchor: anchor.startswith(checker), checker_anchors), None) @@ -60,7 +64,7 @@ def clang_tidy(label_file): def get_labels_with_docs(label_file, docs): - with open(label_file) as f: + with open(label_file, encoding='utf-8') as f: labels = json.load(f, object_pairs_hook=OrderedDict) for checker, label in labels['labels'].items(): @@ -126,7 +130,7 @@ def main(): if args.dry_run: print(json.dumps(labels, indent=2)) else: - with open(args.label_file, 'w') as f: + with open(args.label_file, 'w', encoding='utf-8') as f: json.dump(labels, f, indent=2) diff --git a/scripts/labels/label_tool/doc_url/verifiers/clang_diagnostic.py b/scripts/labels/label_tool/doc_url/verifiers/clang_diagnostic.py index 8899f33ed7..69f06305b7 100644 --- a/scripts/labels/label_tool/doc_url/verifiers/clang_diagnostic.py +++ b/scripts/labels/label_tool/doc_url/verifiers/clang_diagnostic.py @@ -101,6 +101,7 @@ def _try_anchor(prefixes: Collection[Tuple[str, str]], return urllib.parse.urlparse(url). \ _replace(fragment=other_anchor). \ geturl() + return None attempt = _try_anchor(self.anchor_prefixes, url) if attempt: diff --git a/scripts/labels/label_tool/doc_url/verifiers/clang_tidy.py b/scripts/labels/label_tool/doc_url/verifiers/clang_tidy.py index d25682b925..0d70fcdec5 100644 --- a/scripts/labels/label_tool/doc_url/verifiers/clang_tidy.py +++ b/scripts/labels/label_tool/doc_url/verifiers/clang_tidy.py @@ -59,11 +59,11 @@ def skip(self, checker: str, url: str) -> Status: return Status.MISSING return Status.OK - def reset(self, checker: str, url: str) -> Optional[str]: + def reset(self, checker: str, _url: str) -> Optional[str]: group, check = checker.split("-", 1) return self._reset_pattern(group=group, name=check) - def try_fix(self, checker: str, url: str) -> Optional[str]: + def try_fix(self, checker: str, _url: str) -> Optional[str]: group, check = checker.split("-", 1) older_release_url = self._release_fixer( lambda url_: self.verify(checker, url_)[0] == Status.OK, @@ -71,3 +71,4 @@ def try_fix(self, checker: str, url: str) -> Optional[str]: name=check) if older_release_url: return older_release_url + return None diff --git a/scripts/labels/label_tool/doc_url/verifiers/clangsa.py b/scripts/labels/label_tool/doc_url/verifiers/clangsa.py index 394f29e83b..6610c01047 100644 --- a/scripts/labels/label_tool/doc_url/verifiers/clangsa.py +++ b/scripts/labels/label_tool/doc_url/verifiers/clangsa.py @@ -74,6 +74,7 @@ def _try_anchor(url: str) -> Optional[str]: return urllib.parse.urlparse(url). \ _replace(fragment=other_anchor). \ geturl() + return None attempt = _try_anchor(url) if attempt: diff --git a/scripts/labels/label_tool/doc_url/verifiers/generic.py b/scripts/labels/label_tool/doc_url/verifiers/generic.py index e987048dec..4072c751a9 100644 --- a/scripts/labels/label_tool/doc_url/verifiers/generic.py +++ b/scripts/labels/label_tool/doc_url/verifiers/generic.py @@ -29,7 +29,7 @@ class Base: def __init__(self, analyser: str): self.analyser = analyser - def skip(self, checker: str, url: str) -> Status: + def skip(self, _checker: str, _url: str) -> Status: """ Returns `Status.OK` if the current verifier is capable of verifying the `checker`. `Status.SKIP` is returned in case the `checker` is @@ -38,7 +38,7 @@ def skip(self, checker: str, url: str) -> Status: """ return Status.OK - def verify(self, checker: str, url: str) -> Outcome: + def verify(self, _checker: str, _url: str) -> Outcome: """ Verifies that the documentation page at `url` is available and relevant for the `checker` checker. @@ -47,7 +47,7 @@ def verify(self, checker: str, url: str) -> Outcome: """ return Status.UNKNOWN, None - def reset(self, checker: str, url: str) -> Optional[str]: + def reset(self, _checker: str, _url: str) -> Optional[str]: """ Attempts to reset a potentially fixed (e.g., downgraded) documentation URL to its rawest upstream version. @@ -61,7 +61,7 @@ def reset(self, checker: str, url: str) -> Optional[str]: """ return None - def try_fix(self, checker: str, url: str) -> Optional[str]: + def try_fix(self, _checker: str, _url: str) -> Optional[str]: """ Attempts to fix the documentation supposedly available, but in fact missing from `url` for `checker` to some alternative version that is @@ -156,7 +156,7 @@ def check_response(self, response: http.Response) -> Ternary: status, response.reason) return None - def skip(self, checker: str, url: str) -> Status: + def skip(self, _checker: str, url: str) -> Status: return Status.MISSING if not url else Status.OK ResponseToVerifyStatus = {True: Status.OK, @@ -223,7 +223,7 @@ def verify(self, checker: str, url: str) -> Outcome: try: trace("%s/%s -> %s ...", self.analyser, checker, url) response = self.get_url(page) - except lxml.etree.LxmlError: + except lxml.etree.LxmlError: # pylint: disable=c-extension-no-member import traceback traceback.print_exc() @@ -236,7 +236,7 @@ def verify(self, checker: str, url: str) -> Outcome: return self.ResponseToVerifyStatus[http_status], response dom = self.get_dom(page) - if not (dom is not None): + if dom is None: return Status.NOT_OK, response dom = cast(html.HtmlElement, dom) # mypy does not recognise the if. @@ -271,7 +271,7 @@ def find_anchors_for_text(self, url: str, text: str) -> Iterable[str]: """ page, _ = self._http.split_anchor(url) dom = self.get_dom(page) - if not (dom is not None): + if dom is None: return iter(()) dom = cast(html.HtmlElement, dom) diff --git a/scripts/labels/label_tool/doc_url/verifiers/llvm/releases.py b/scripts/labels/label_tool/doc_url/verifiers/llvm/releases.py index 3f01661205..59f6253d8c 100644 --- a/scripts/labels/label_tool/doc_url/verifiers/llvm/releases.py +++ b/scripts/labels/label_tool/doc_url/verifiers/llvm/releases.py @@ -41,7 +41,7 @@ def fetch_llvm_release_versions() -> Versions: traceback.print_exc() error("Failed to download or parse page '%s'!", url) - return list() + return [] finally: try: del os.environ["MOZ_HEADLESS"] diff --git a/scripts/labels/label_tool/doc_url/verifiers/status.py b/scripts/labels/label_tool/doc_url/verifiers/status.py index dbdf8c7695..022a5a5b30 100644 --- a/scripts/labels/label_tool/doc_url/verifiers/status.py +++ b/scripts/labels/label_tool/doc_url/verifiers/status.py @@ -11,23 +11,23 @@ class Status(Enum): """The outcome of an attempt at verifying a checker's documentation.""" - """The result could not be determined.""" UNKNOWN = Enumerator() + """The result could not be determined.""" + SKIP = Enumerator() """ The verifier engine skipped verifying the checker. This is an internal indicator used for "multi-pass" verifications, and it is not normally reported to the user. """ - SKIP = Enumerator() + MISSING = Enumerator() """ The verification could not execute because the documentation data is empty. """ - MISSING = Enumerator() - """Successful.""" OK = Enumerator() + """Successful.""" - """Not successful. (Deterministic result.)""" NOT_OK = Enumerator() + """Not successful. (Deterministic result.)""" diff --git a/scripts/labels/label_tool/doc_url/verify_tool/__main__.py b/scripts/labels/label_tool/doc_url/verify_tool/__main__.py index 3fe70f4a2a..3717805e9d 100755 --- a/scripts/labels/label_tool/doc_url/verify_tool/__main__.py +++ b/scripts/labels/label_tool/doc_url/verify_tool/__main__.py @@ -59,13 +59,13 @@ """ f""" Having found checkers without a 'doc_url' label will set the bit -'{tool.ReturnFlags.HadMissing}'. +'{tool.ReturnFlags.HAD_MISSING}'. Having found checkers that have a "Not OK" label will set the bit -'{tool.ReturnFlags.HadNotOK}'. +'{tool.ReturnFlags.HAD_NOT_OK}'. Having found checkers that were "Not OK" but managed to obtain a fixed, -working URL will set the bit '{tool.ReturnFlags.HadFound}'. +working URL will set the bit '{tool.ReturnFlags.HAD_FOUND}'. Having found checkers that were "Not OK" and failed the attempted -automatic fixing routing will set the bit '{tool.ReturnFlags.HadGone}'. +automatic fixing routing will set the bit '{tool.ReturnFlags.HAD_GONE}'. """ ) epilogue: str = "" @@ -166,7 +166,7 @@ def default_checker_label_dir() -> Optional[pathlib.Path]: metavar="CHECKER", nargs='*', type=str, - help=""" + help=f""" Filter for only the specified checkers before executing the verification. This filter matches only for the checker's name (as present in the configuration file), and every checker of every candidate analyser is matched @@ -174,12 +174,13 @@ def default_checker_label_dir() -> Optional[pathlib.Path]: It is not an error to specify filters that do not match anything. It is possible to match entire patterns of names using the '?' and '*' wildcards, as understood by the 'fnmatch' library, see -"https://docs.python.org/%d.%d/library/fnmatch.html#fnmatch.fnmatchcase" +"https://docs.python.org/{sys.version_info[0]}.{sys.version_info[1]}/library/\ +fnmatch.html#fnmatch.fnmatchcase" for details. Depending on your shell, you might have to specify wildcards in single quotes, e.g., 'alpha.*', to prevent the shell from globbing first! If 'None' is given, automatically run for every checker. -""" % (sys.version_info[0], sys.version_info[1])) +""") output = parser.add_argument_group("output control arguments", """ These optional arguments allow enabling additional verbosity for the output @@ -234,45 +235,45 @@ def default_checker_label_dir() -> Optional[pathlib.Path]: return parser -def _handle_package_args(args: argparse.Namespace): - if not args.checker_label_dir: +def _handle_package_args(args_: argparse.Namespace): + if not args_.checker_label_dir: log("%sFATAL: Failed to find the checker label configuration " "directory, and it was not specified. " "Please specify!", emoji(":no_entry: ")) raise argparse.ArgumentError(None, "positional argument 'checker_label_dir'") - if args.jobs < 0: + if args_.jobs < 0: log("%sFATAL: There can not be a non-positive number of jobs.", emoji(":no_entry: ")) raise argparse.ArgumentError(None, "-j/--jobs") - OutputSettings.set_report_missing(args.report_missing or - args.verbose or - args.very_verbose) - OutputSettings.set_report_ok(args.report_ok or - args.verbose or - args.very_verbose) - GlobalOutputSettings.set_trace(args.verbose_debug or args.very_verbose) + OutputSettings.set_report_missing(args_.report_missing or + args_.verbose or + args_.very_verbose) + OutputSettings.set_report_ok(args_.report_ok or + args_.verbose or + args_.very_verbose) + GlobalOutputSettings.set_trace(args_.verbose_debug or args_.very_verbose) -def main(args: argparse.Namespace) -> Optional[int]: +def main(args_: argparse.Namespace) -> Optional[int]: try: - _handle_package_args(args) - except argparse.ArgumentError: + _handle_package_args(args_) + except argparse.ArgumentError as err: # Simulate argparse's return code of parse_args(). - raise SystemExit(2) + raise SystemExit(2) from err rc = 0 - statistics: List[tool.Statistics] = list() - trace("Checking checker labels from '%s'", args.checker_label_dir) + statistics: List[tool.Statistics] = [] + trace("Checking checker labels from '%s'", args_.checker_label_dir) - args.checker_label_dir = pathlib.Path(args.checker_label_dir) - if not args.checker_label_dir.is_dir(): - error("'%s' is not a directory!", args.checker_label_dir) + args_.checker_label_dir = pathlib.Path(args_.checker_label_dir) + if not args_.checker_label_dir.is_dir(): + error("'%s' is not a directory!", args_.checker_label_dir) return 1 # FIXME: pathlib.Path.walk() is only available Python >= 3.12. - for root, _, files in os.walk(args.checker_label_dir): + for root, _, files in os.walk(args_.checker_label_dir): root = pathlib.Path(root) for file in sorted(files): @@ -280,7 +281,7 @@ def main(args: argparse.Namespace) -> Optional[int]: if file.suffix != ".json": continue analyser = file.stem - if args.analysers and analyser not in args.analysers: + if args_.analysers and analyser not in args_.analysers: continue path = root / file @@ -297,22 +298,22 @@ def main(args: argparse.Namespace) -> Optional[int]: error("Failed to obtain checker labels for '%s'!", analyser) continue - if args.checkers: + if args_.checkers: labels = {checker: url for checker, url in labels.items() - for filter_ in args.checkers + for filter_ in args_.checkers if fnmatch.fnmatchcase(checker, filter_)} if not labels: - log("%sNo checkers are configured%s.", - emoji(":cup_with_straw: "), - " or match the \"--checkers\" %s" - % plural(args.checkers, "filter", "filters") - if args.checkers else "") + filt = " or match the \"--checkers\" %s" + \ + plural(args_.checkers, "filter", "filters") \ + if args_.checkers else "" + log(f'{emoji(":cup_with_straw: ")}' + f'No checkers are configured{filt}.') continue - process_count = clamp(1, args.jobs, len(labels)) \ - if len(labels) > 2 * args.jobs else 1 - fixes: SingleLabels = dict() + process_count = clamp(1, args_.jobs, len(labels)) \ + if len(labels) > 2 * args_.jobs else 1 + fixes: SingleLabels = {} conflicts: Set[str] = set() for verifier in analyser_selection.select_verifier(analyser, labels): @@ -325,8 +326,8 @@ def main(args: argparse.Namespace) -> Optional[int]: verifier, labels, process_count, - args.skip_fixes, - args.reset_to_upstream, + args_.skip_fixes, + args_.reset_to_upstream, ) statistics.append(statistic) rc = int(tool.ReturnFlags(rc) | status) @@ -347,10 +348,10 @@ def main(args: argparse.Namespace) -> Optional[int]: except KeyError: fixes[checker] = fix - if args.apply_fixes and fixes: + if args_.apply_fixes and fixes: log("%sUpdating %s %s for '%s'... ('%s')", emoji(":writing_hand: "), - coloured("%d" % len(fixes), "green"), + coloured(len(fixes), "green"), plural(fixes, "checker", "checkers"), analyser, path) diff --git a/scripts/labels/label_tool/doc_url/verify_tool/action.py b/scripts/labels/label_tool/doc_url/verify_tool/action.py index f3937fc4a3..99c3684345 100644 --- a/scripts/labels/label_tool/doc_url/verify_tool/action.py +++ b/scripts/labels/label_tool/doc_url/verify_tool/action.py @@ -103,10 +103,10 @@ def verify_checker(verifier: HTTPStatusCodeVerifier, def run_verification(pool: Pool, urls: SingleLabels) \ -> Tuple[List[str], int, List[str], List[str]]: - ok: List[str] = list() + ok: List[str] = [] skip = 0 - not_ok: List[str] = list() - missing: List[str] = list() + not_ok: List[str] = [] + missing: List[str] = [] def _consume_result(checker: str, s: Status): if s == Status.OK: @@ -149,7 +149,7 @@ def reset_checker(verifier: HTTPStatusCodeVerifier, def run_reset(pool: Pool, urls: SingleLabels) -> Tuple[int, SingleLabels]: attempted = 0 - new_urls: SingleLabels = dict() + new_urls: SingleLabels = {} def _consume_result(checker: str, was_attempted: bool, @@ -195,8 +195,8 @@ def try_fix_checker(verifier: HTTPStatusCodeVerifier, def run_fixes(pool: Pool, urls: SingleLabels) -> Tuple[SingleLabels, SingleLabels]: - found: SingleLabels = dict() - gone: SingleLabels = dict() + found: SingleLabels = {} + gone: SingleLabels = {} def _consume_result(checker: str, old_url: Optional[str], diff --git a/scripts/labels/label_tool/doc_url/verify_tool/report.py b/scripts/labels/label_tool/doc_url/verify_tool/report.py index f6f061c869..d938c0d8fd 100644 --- a/scripts/labels/label_tool/doc_url/verify_tool/report.py +++ b/scripts/labels/label_tool/doc_url/verify_tool/report.py @@ -25,7 +25,7 @@ def print_verifications(analyser: str, emoji(":magnifying_glass_tilted_left:" ":magnifying_glass_tilted_right: "), analyser, - coloured("%d" % len(missing), "yellow"), + coloured(len(missing), "yellow"), plural(missing, "checker", "checkers"), plural(missing, "does", "do"), ) @@ -41,16 +41,16 @@ def print_verifications(analyser: str, log("%s%s: All %s %s successfully verified.", emoji(":magnifying_glass_tilted_left::check_mark_button: "), analyser, - coloured("%d" % len(ok), "green"), + coloured(len(ok), "green"), plural(ok, "checker", "checkers"), ) else: log("%s%s: %s %s failed documentation verification. (%s succeeded.)", emoji(":magnifying_glass_tilted_left::warning: "), analyser, - coloured("%d" % len(not_ok), "red"), + coloured(len(not_ok), "red"), plural(not_ok, "checker", "checkers"), - coloured("%d" % len(ok), "green") + coloured(len(ok), "green") if ok else coloured("0", "red"), ) @@ -79,9 +79,9 @@ def print_resets(analyser: str, log("%s%s: Tried to reset %s %s documentation URL. %s changed.", emoji(":magnifying_glass_tilted_left::right_arrow_curving_left: "), analyser, - coloured("%d" % attempted, "magenta"), + coloured(attempted, "magenta"), plural(attempted, "checker's", "checkers'"), - coloured("%d" % len(new_urls), "cyan") + coloured(len(new_urls), "cyan") if new_urls else coloured("0", "red"), ) deque((log(" %s· %s [%s]", @@ -93,7 +93,7 @@ def print_resets(analyser: str, def print_fixes(analyser: str, - urls: SingleLabels, + _urls: SingleLabels, found: SingleLabels, gone: SingleLabels): if not gone: @@ -101,7 +101,7 @@ def print_fixes(analyser: str, log("%s%s: Found new documentation for all %s %s.", emoji(":magnifying_glass_tilted_left::telescope: "), analyser, - coloured("%d" % len(found), "green"), + coloured(len(found), "green"), plural(len(found), "checker", "checkers"), ) else: @@ -109,16 +109,16 @@ def print_fixes(analyser: str, log("%s%s: All %s %s gone.", emoji(":magnifying_glass_tilted_left::headstone: "), analyser, - coloured("%d" % len(gone), "red"), + coloured(len(gone), "red"), plural(len(gone), "checker", "checkers"), ) else: log("%s%s: %s %s gone. (Found %s.)", emoji(":magnifying_glass_tilted_left::bar_chart: "), analyser, - coloured("%d" % len(gone), "red"), + coloured(len(gone), "red"), plural(len(gone), "checker", "checkers"), - coloured("%d" % len(found), "green") + coloured(len(found), "green") if found else coloured("0", "red") ) diff --git a/scripts/labels/label_tool/doc_url/verify_tool/tool.py b/scripts/labels/label_tool/doc_url/verify_tool/tool.py index fc0a27c4f2..dce24b1567 100644 --- a/scripts/labels/label_tool/doc_url/verify_tool/tool.py +++ b/scripts/labels/label_tool/doc_url/verify_tool/tool.py @@ -43,13 +43,13 @@ class ReturnFlags(IntFlag): # Zero indicates an all-success, but `Enumerator()` starts from 1. # Reserved flags used for other purposes external to the tool. - GeneralError = Enumerator() - ConfigurationOrArgsError = Enumerator() + GENERAL_ERROR = Enumerator() + CONFIGURATION_OR_ARGS_ERROR = Enumerator() - HadMissing = Enumerator() - HadNotOK = Enumerator() - HadFound = Enumerator() - HadGone = Enumerator() + HAD_MISSING = Enumerator() + HAD_NOT_OK = Enumerator() + HAD_FOUND = Enumerator() + HAD_GONE = Enumerator() def execute(analyser: str, @@ -88,7 +88,7 @@ def execute(analyser: str, stats = stats._replace(Reset=len(new_urls) if new_urls else None, ) - urls_to_save: SingleLabels = dict() + urls_to_save: SingleLabels = {} ok, skip, not_ok, missing = action.run_verification(pool, labels) report.print_verifications(analyser, labels, ok, not_ok, missing) urls_to_save.update({checker: labels[checker] for checker in ok}) @@ -99,10 +99,10 @@ def execute(analyser: str, OK=len(ok) if ok else None, Not_OK=len(not_ok) if not_ok else None, ) - status = status | (ReturnFlags.HadMissing if missing else 0) + status = status | (ReturnFlags.HAD_MISSING if missing else 0) if not_ok: - status |= ReturnFlags.HadNotOK + status |= ReturnFlags.HAD_NOT_OK if not skip_fixes: found, gone = action.run_fixes( pool, {checker: labels[checker] for checker @@ -113,7 +113,7 @@ def execute(analyser: str, stats = stats._replace(Found=len(found) if found else None, Gone=len(gone) if gone else None, ) - status = status | (ReturnFlags.HadFound if found else 0) \ - | (ReturnFlags.HadGone if gone else 0) + status = status | (ReturnFlags.HAD_FOUND if found else 0) \ + | (ReturnFlags.HAD_GONE if gone else 0) return status, urls_to_save, stats diff --git a/scripts/labels/label_tool/http_.py b/scripts/labels/label_tool/http_.py index 1cbef1f384..ceaa93c23c 100644 --- a/scripts/labels/label_tool/http_.py +++ b/scripts/labels/label_tool/http_.py @@ -18,7 +18,7 @@ Response = urllib3.response.BaseHTTPResponse -URL = Union[str, urllib.parse.ParseResult] +Url = Union[str, urllib.parse.ParseResult] class HTMLAcquirer: @@ -43,7 +43,7 @@ def _get_url_raw(self, url: str) -> Response: trace("HTTP GET '%s'", url) return self._pool.request("GET", url) - def get_url(self, url: URL) -> Response: + def get_url(self, url: Url) -> Response: """ Downloads the content of `url` and returns the raw HTTP response. """ @@ -61,7 +61,7 @@ def _get_dom_raw(self, url: str) -> Optional[html.HtmlElement]: dom = html.fromstring(response.data) if response.data else None return dom - def get_dom(self, url: URL) -> Optional[html.HtmlElement]: + def get_dom(self, url: Url) -> Optional[html.HtmlElement]: """ Downloads the content of `url`. If the download is successful, parses the obtained HTML and returns the @@ -71,7 +71,7 @@ def get_dom(self, url: URL) -> Optional[html.HtmlElement]: url = url.geturl() return self._get_dom_raw(url) - def split_anchor(self, url: URL) -> Tuple[str, str]: + def split_anchor(self, url: Url) -> Tuple[str, str]: if isinstance(url, str) and '#' not in url: return url, "" @@ -97,10 +97,10 @@ class CachingHTMLAcquirer(HTMLAcquirer): def __init__(self, cache_size: int = DefaultCacheSize): super().__init__() self._cache_capacity = cache_size - self._cache: Dict[str, CachingHTMLAcquirer.CacheType] = dict() - self._cache_lru: Dict[str, datetime.datetime] = dict() + self._cache: Dict[str, CachingHTMLAcquirer.CacheType] = {} + self._cache_lru: Dict[str, datetime.datetime] = {} - def get_url(self, url: URL) -> Response: + def get_url(self, url: Url) -> Response: """ Downloads the content of `url` after stripping the HTML anchor off of the request, and returns the raw HTTP response. @@ -115,7 +115,7 @@ def get_url(self, url: URL) -> Response: response, _ = cached return response - def get_dom(self, url: URL) -> Optional[html.HtmlElement]: + def get_dom(self, url: Url) -> Optional[html.HtmlElement]: """ Downloads the content of `url` after stripping the HTML anchor off of the request. diff --git a/scripts/labels/label_tool/output.py b/scripts/labels/label_tool/output.py index 32a69e011c..78071ebeae 100644 --- a/scripts/labels/label_tool/output.py +++ b/scripts/labels/label_tool/output.py @@ -62,18 +62,19 @@ def log(fmt: str, *args, **kwargs): def _log_with_prefix(prefix: str, fmt: str, *args, **kwargs): """Logging stub.""" - fmt = "%s: %s" % (prefix, fmt) + fmt = f"{prefix}: {fmt}" return log(fmt, *args, **kwargs) def error(fmt: str, *args, **kwargs): """Logging stub.""" - return _log_with_prefix("%sERROR" % emoji(":warning: "), + return _log_with_prefix(f"{emoji(':warning: ')}ERROR", fmt, *args, **kwargs) def trace(fmt: str, *args, **kwargs): """Logging stub.""" if Settings.trace(): - return _log_with_prefix("%sTRACE" % emoji(":speech_balloon: "), + return _log_with_prefix(f'{emoji(":speech_balloon: ")}TRACE', fmt, *args, **kwargs) + return None diff --git a/scripts/labels/label_tool/transformer.py b/scripts/labels/label_tool/transformer.py index ea634b2e89..9eac6dd9fc 100644 --- a/scripts/labels/label_tool/transformer.py +++ b/scripts/labels/label_tool/transformer.py @@ -76,7 +76,7 @@ def __init__(self, releases: LazyVersions): self._rules: Dict[Union[Version, packaging.version.InfinityType, packaging.version.NegativeInfinityType], - Optional[Callable]] = dict() + Optional[Callable]] = {} self._rules[packaging.version.NegativeInfinity] = None self._rules[packaging.version.Infinity] = None diff --git a/scripts/labels/label_tool/util.py b/scripts/labels/label_tool/util.py index 5fd448fdda..9fa1d3f482 100644 --- a/scripts/labels/label_tool/util.py +++ b/scripts/labels/label_tool/util.py @@ -25,7 +25,7 @@ class _Singleton: without having to serialise a complex state object over the communication channel between the manager and the processes. """ - _instances: Dict[Type, Any] = dict() + _instances: Dict[Type, Any] = {} def __new__(cls, *args, **kwargs) -> object: if cls not in cls._instances: diff --git a/scripts/labels/mdl.py b/scripts/labels/mdl.py index b1ac7ec701..4b93e683de 100644 --- a/scripts/labels/mdl.py +++ b/scripts/labels/mdl.py @@ -2,7 +2,6 @@ import json import re import urllib3 -import xml.etree.ElementTree as ET def cli_args(): @@ -45,7 +44,7 @@ def main(): "severity:STYLE" ] - with open(args.label_file, 'w') as f: + with open(args.label_file, 'w', encoding='utf-8') as f: json.dump({ "analyzer": "mdl", "labels": dict(sorted(labels.items())) diff --git a/scripts/resources/__init__.py b/scripts/resources/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/scripts/resources/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/scripts/send_log_config.py b/scripts/send_log_config.py index 3665c01eb3..63a1c76462 100755 --- a/scripts/send_log_config.py +++ b/scripts/send_log_config.py @@ -14,51 +14,56 @@ import struct -parser = argparse.ArgumentParser( - description=""" -Send a python log config (json) to a port where a logging process listens. +def main(): + parser = argparse.ArgumentParser( + description=""" + Send a python log config (json) to a port where a logging process listens. -Further details about the log configuration format and usage can be found here: -https://docs.python.org/2/library/logging.config.html """, - formatter_class=argparse.RawTextHelpFormatter) + Further details about the log configuration format and usage can be found + here: + https://docs.python.org/2/library/logging.config.html """, + formatter_class=argparse.RawTextHelpFormatter) -parser.add_argument('-c', action="store", required="True", - dest="config_file", - help="Log configuration in json format.") + parser.add_argument('-c', action="store", required="True", + dest="config_file", + help="Log configuration in json format.") -parser.add_argument('-p', action="store", required="True", - dest="port", type=int, - help="Port number of the logger server.") + parser.add_argument('-p', action="store", required="True", + dest="port", type=int, + help="Port number of the logger server.") -args = parser.parse_args() + args = parser.parse_args() -try: - with open(args.config_file, 'rb') as cf: - log_config = cf.read() + try: + with open(args.config_file, 'rb') as cf: + log_config = cf.read() - # Just a simple check for valid json before sending - json.loads(log_config) + # Just a simple check for valid json before sending + json.loads(log_config) - data_to_send = log_config + data_to_send = log_config - host = 'localhost' - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + host = 'localhost' + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - print('Connecting to {0}:{1} ...'.format(host, args.port)) - s.connect((host, args.port)) - print('Connection done.') + print(f'Connecting to {host}:{args.port} ...') + s.connect((host, args.port)) + print('Connection done.') - print('Sending config ...') - s.send(struct.pack('>L', len(data_to_send))) - s.send(data_to_send) - s.close() - print('Sending config done.') + print('Sending config ...') + s.send(struct.pack('>L', len(data_to_send))) + s.send(data_to_send) + s.close() + print('Sending config done.') -except OSError as ex: - print("Failed to read config file" + args.config_file) - print(ex) - print(ex.strerror) + except OSError as ex: + print("Failed to read config file" + args.config_file) + print(ex) + print(ex.strerror) -except ValueError as ex: - print("Wrong config file format.") - print(ex) + except ValueError as ex: + print("Wrong config file format.") + print(ex) + +if __name__ == "__main__": + main() diff --git a/scripts/test/__init__.py b/scripts/test/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/scripts/test/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/scripts/test/func_template/__init__.py b/scripts/test/func_template/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/scripts/test/func_template/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/scripts/test/func_template/template_test.py b/scripts/test/func_template/template_test.py index 0019432d0f..9903edc310 100644 --- a/scripts/test/func_template/template_test.py +++ b/scripts/test/func_template/template_test.py @@ -18,6 +18,7 @@ import os import shutil +import sys import unittest import uuid @@ -26,6 +27,9 @@ from libtest import project +# This is a test skeleton. The unused variables will be used probably in the +# actual tests. +# pylint: disable=unused-variable,unused-argument class TestSkeleton(unittest.TestCase): _ccClient = None @@ -166,4 +170,3 @@ def test_skel(self): """ Test some feature. """ - pass diff --git a/scripts/test/run_server_performance_test.py b/scripts/test/run_server_performance_test.py index 73d39a13fb..578d565cae 100644 --- a/scripts/test/run_server_performance_test.py +++ b/scripts/test/run_server_performance_test.py @@ -62,9 +62,9 @@ def print_process_output(message, stdout, stderr): return LOG.info(message) - LOG.info('-' * 20 + 'stdout' + '-' * 20) + LOG.info('%sstdout%s', '-' * 20, '-' * 20) print(stdout) - LOG.info('-' * 20 + 'stderr' + '-' * 20) + LOG.info('%sstderr%s', '-' * 20, '-' * 20) print(stderr) LOG.info('-' * (40 + len('stdout'))) @@ -179,7 +179,7 @@ class UserSimulator: def __init__(self, stat, beta): UserSimulator._counter += 1 self._id = UserSimulator._counter - self._actions = list() + self._actions = [] self._stat = stat self._beta = beta @@ -209,7 +209,7 @@ def play(self): # The exit code of some commands (e.g. CodeChecker cmd diff) can be # 2 if some reports were found. We consider this exit code normal. - if ret != 0 and ret != 2: + if ret not in (0, 2): LOG.error("'%s' job has failed with '%d' error code!", name, ret) @@ -407,7 +407,7 @@ def main(): stat = StatManager() timer = None - def finish_test(signum, frame): + def finish_test(signum, _): LOG.error('-----> Performance test stops. ' 'Please wait for stopping all subprocesses. <-----') diff --git a/scripts/thrift/__init__.py b/scripts/thrift/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/scripts/thrift/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/scripts/thrift/client.py b/scripts/thrift/client.py index 685005cf4a..337a9f3881 100644 --- a/scripts/thrift/client.py +++ b/scripts/thrift/client.py @@ -27,7 +27,7 @@ from thrift.transport import THttpClient from thrift.protocol import TJSONProtocol from thrift.Thrift import TApplicationException -except: +except Exception: print("'thrift' package (https://pypi.org/project/thrift/) is not " "available in your environment. Please install it before you run " "this script again.") @@ -46,21 +46,24 @@ serverInfoService as ServerInfoAPI_v6 from codechecker_api_shared.ttypes import RequestFailed -except: +except Exception: print("'codechecker_api' and 'codechecker_api_shared' must be available " "in your environment to run this script. Please install it before " "you run this script again:") - print(" - https://github.com/Ericsson/codechecker/blob/master/web/api/py/codechecker_api/dist/codechecker_api.tar.gz") - print(" - https://github.com/Ericsson/codechecker/blob/master/web/api/py/codechecker_api_shared/dist/codechecker_api_shared.tar.gz") + print(" - https://github.com/Ericsson/codechecker/blob/master/web/api/py" + "/codechecker_api/dist/codechecker_api.tar.gz") + print(" - https://github.com/Ericsson/codechecker/blob/master/web/api/py" + "/codechecker_api_shared/dist/codechecker_api_shared.tar.gz") sys.exit(1) def get_client_api_version() -> str: """ Get client api version from the installed codechecker package. """ p = subprocess.run([ - "pip3", "show", "codechecker_api"], stdout=subprocess.PIPE) + "pip3", "show", "codechecker_api"], stdout=subprocess.PIPE, + check=False) ver = p.stdout.decode('utf-8').strip().split('\n')[1] - res = re.search('^Version:\ (.*)$', ver) + res = re.search('^Version: (.*)$', ver) return res.group(1) @@ -91,8 +94,16 @@ def create_client( return cls.Client(protocol) -def main(args): +def main(): """ Send multiple Thrift API requests to the server. """ + parser = argparse.ArgumentParser( + prog="client", + description=""" +Python client to communicate with a CodeChecker server.""", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + __add_arguments_to_parser(parser) + args = parser.parse_args() + # Get server info. cli_server_info = create_client(args, ServerInfoAPI_v6, "ServerInfo") package_version = cli_server_info.getPackageVersion() @@ -176,12 +187,4 @@ def __add_arguments_to_parser(parser): help="Password.") if __name__ == "__main__": - parser = argparse.ArgumentParser( - prog="client", - description=""" -Python client to communicate with a CodeChecker server.""", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - __add_arguments_to_parser(parser) - args = parser.parse_args() - - main(args) + main() diff --git a/tools/bazel/requirements_py/dev/requirements.txt b/tools/bazel/requirements_py/dev/requirements.txt index f0492af5b4..20cbbe3930 100644 --- a/tools/bazel/requirements_py/dev/requirements.txt +++ b/tools/bazel/requirements_py/dev/requirements.txt @@ -1,4 +1,4 @@ pytest==7.3.1 pycodestyle==2.12.0 -pylint==2.8.2 +pylint==3.2.4 mypy==1.7.1 diff --git a/tools/report-converter/codechecker_report_converter/analyzers/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/analyzer_result.py index 2c6a439050..942f61da0f 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/analyzer_result.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/analyzer_result.py @@ -86,7 +86,7 @@ def transform( return bool(all_reports) @abstractmethod - def get_reports(self, analyzer_result_file_path: str) -> List[Report]: + def get_reports(self, file_path: str) -> List[Report]: """ Get reports from the given analyzer result. """ raise NotImplementedError("Subclasses should implement this!") diff --git a/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/parser.py index 646712527a..fabe84f9ad 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/clang_tidy/parser.py @@ -25,7 +25,7 @@ class Parser(BaseParser): """ Parser for clang-tidy console output. """ def __init__(self): - super(Parser, self).__init__() + super().__init__() # Regex for parsing a clang-tidy message. self.message_line_re = re.compile( @@ -58,7 +58,7 @@ def __init__(self): # Matches pre Clang 17 fix-its: # " fixit-text" self.fixit_old_re = re.compile( - r'^\s+(?P\S.*)') + r'^\s*(?P\S.*)') # Matches post clang 17 fix-its # " 28 | fixit-text" @@ -111,7 +111,7 @@ def _parse_line( reports.append(r) - return reports, line + return reports, line def _get_category(self, checker_name: str) -> str: """ Get category for Clang-Tidy checker. """ diff --git a/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/parser.py index c1cd5890eb..d0f3763b55 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/coccinelle/parser.py @@ -25,7 +25,7 @@ class Parser(BaseParser): """ def __init__(self, analyzer_result: str): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result self.checker_name: str = '' diff --git a/tools/report-converter/codechecker_report_converter/analyzers/cppcheck/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/cppcheck/analyzer_result.py index fbfae3cf05..9f4a294d30 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/cppcheck/analyzer_result.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/cppcheck/analyzer_result.py @@ -29,17 +29,15 @@ class AnalyzerResult(AnalyzerResultBase): NAME = 'Cppcheck' URL = 'http://cppcheck.sourceforge.net' - def get_reports(self, analyzer_result_path: str) -> List[Report]: + def get_reports(self, file_path: str) -> List[Report]: """ Get reports from the given analyzer result. """ reports: List[Report] = [] plist_files = [] - if os.path.isdir(analyzer_result_path): - plist_files = glob.glob(os.path.join( - analyzer_result_path, "*.plist")) - elif os.path.isfile(analyzer_result_path) and \ - analyzer_result_path.endswith(".plist"): - plist_files = [analyzer_result_path] + if os.path.isdir(file_path): + plist_files = glob.glob(os.path.join(file_path, "*.plist")) + elif os.path.isfile(file_path) and file_path.endswith(".plist"): + plist_files = [file_path] else: LOG.error("The given input should be an existing CppCheck result " "directory or a plist file.") diff --git a/tools/report-converter/codechecker_report_converter/analyzers/cpplint/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/cpplint/parser.py index 46fd30d4df..923136a829 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/cpplint/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/cpplint/parser.py @@ -24,7 +24,7 @@ class Parser(BaseParser): """ def __init__(self, analyzer_result): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result diff --git a/tools/report-converter/codechecker_report_converter/analyzers/gcc/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/gcc/analyzer_result.py index 16a9f9af0b..4d1bb8b18e 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/gcc/analyzer_result.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/gcc/analyzer_result.py @@ -25,10 +25,7 @@ class AnalyzerResult(AnalyzerResultBase): NAME = 'GNU Compiler Collection Static Analyzer' URL = 'https://gcc.gnu.org/wiki/StaticAnalyzer' - def __init__(self): - super(AnalyzerResult, self).__init__() - - def get_reports(self, result_file_path: str) -> List[Report]: + def get_reports(self, file_path: str) -> List[Report]: """ Get reports from the given analyzer result file. """ - return sarif.Parser().get_reports(result_file_path) + return sarif.Parser().get_reports(file_path) diff --git a/tools/report-converter/codechecker_report_converter/analyzers/golint/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/golint/parser.py index 28053ed222..5b3c3e82c1 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/golint/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/golint/parser.py @@ -23,7 +23,7 @@ class Parser(BaseParser): """ Parser for Golint output. """ def __init__(self, analyzer_result): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result diff --git a/tools/report-converter/codechecker_report_converter/analyzers/infer/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/infer/analyzer_result.py index 115d5ffff3..f351d91f3b 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/infer/analyzer_result.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/infer/analyzer_result.py @@ -29,21 +29,21 @@ class AnalyzerResult(AnalyzerResultBase): URL = 'https://fbinfer.com' def __init__(self): - super(AnalyzerResult, self).__init__() + super().__init__() self.__infer_out_parent_dir = None self.__file_cache: Dict[str, File] = {} - def get_reports(self, result_file_path: str) -> List[Report]: + def get_reports(self, file_path: str) -> List[Report]: """ Parse the given analyzer result. """ reports: List[Report] = [] - if os.path.isdir(result_file_path): - report_file = os.path.join(result_file_path, "report.json") - self.__infer_out_parent_dir = os.path.dirname(result_file_path) + if os.path.isdir(file_path): + report_file = os.path.join(file_path, "report.json") + self.__infer_out_parent_dir = os.path.dirname(file_path) else: - report_file = result_file_path + report_file = file_path self.__infer_out_parent_dir = os.path.dirname( - os.path.dirname(result_file_path)) + os.path.dirname(file_path)) if not os.path.exists(report_file): LOG.error("Report file does not exist: %s", report_file) @@ -56,7 +56,7 @@ def get_reports(self, result_file_path: str) -> List[Report]: except IOError: LOG.error("Failed to parse the given analyzer result '%s'. Please " "give an infer output directory which contains a valid " - "'report.json' file.", result_file_path) + "'report.json' file.", file_path) return reports for bug in bugs: @@ -79,6 +79,7 @@ def __get_abs_path(self, source_path): return full_path LOG.warning("No source file found: %s", source_path) + return None def __parse_report(self, bug) -> Optional[Report]: """ Parse the given report and create a message from them. """ @@ -87,9 +88,7 @@ def __parse_report(self, bug) -> Optional[Report]: message = bug['qualifier'] line = int(bug['line']) - col = int(bug['column']) - if col < 0: - col = 0 + col = max(int(bug['column']), 0) source_path = self.__get_abs_path(bug['file']) if not source_path: @@ -121,9 +120,7 @@ def __parse_bug_trace(self, bug_trace) -> Optional[BugPathEvent]: message = bug_trace['description'] line = int(bug_trace['line_number']) - col = int(bug_trace['column_number']) - if col < 0: - col = 0 + col = max(int(bug_trace['column_number']), 0) return BugPathEvent( message, diff --git a/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/parser.py index 9adfcc242f..e16ef0aa6a 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/kerneldoc/parser.py @@ -24,7 +24,7 @@ class Parser(BaseParser): """ def __init__(self, analyzer_result: str): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result diff --git a/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/parser.py index f0cf4b10b7..96f92a39c0 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/markdownlint/parser.py @@ -23,7 +23,7 @@ class Parser(BaseParser): """ Parser for Markdownlint output. """ def __init__(self, analyzer_result): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result diff --git a/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/parser.py index d250a1d3d2..c5ebce7d30 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/pyflakes/parser.py @@ -23,7 +23,7 @@ class Parser(BaseParser): """ Parser for Pyflakes output. """ def __init__(self, analyzer_result): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result diff --git a/tools/report-converter/codechecker_report_converter/analyzers/pylint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/pylint/analyzer_result.py index 60b1a8c21c..b278bfbf04 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/pylint/analyzer_result.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/pylint/analyzer_result.py @@ -28,34 +28,33 @@ class AnalyzerResult(AnalyzerResultBase): NAME = 'Pylint' URL = 'https://www.pylint.org' - def get_reports(self, result_file_path: str) -> List[Report]: + def get_reports(self, file_path: str) -> List[Report]: """ Get reports from the given analyzer result. """ reports: List[Report] = [] - if not os.path.exists(result_file_path): - LOG.error("Report file does not exist: %s", result_file_path) + if not os.path.exists(file_path): + LOG.error("Report file does not exist: %s", file_path) return reports try: - with open(result_file_path, 'r', + with open(file_path, 'r', encoding="utf-8", errors="ignore") as f: bugs = json.load(f) except (IOError, json.decoder.JSONDecodeError): LOG.error("Failed to parse the given analyzer result '%s'. Please " "give a valid json file generated by Pylint.", - result_file_path) + file_path) return reports file_cache: Dict[str, File] = {} for bug in bugs: - file_path = os.path.join( - os.path.dirname(result_file_path), bug.get('path')) - if not os.path.exists(file_path): - LOG.warning("Source file does not exists: %s", file_path) + fp = os.path.join(os.path.dirname(file_path), bug.get('path')) + if not os.path.exists(fp): + LOG.warning("Source file does not exists: %s", fp) continue reports.append(Report( - get_or_create_file(os.path.abspath(file_path), file_cache), + get_or_create_file(os.path.abspath(fp), file_cache), int(bug['line']), int(bug['column']), bug['message'], diff --git a/tools/report-converter/codechecker_report_converter/analyzers/roslynator/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/roslynator/analyzer_result.py index 31becc1c7d..3afc5625a8 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/roslynator/analyzer_result.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/roslynator/analyzer_result.py @@ -30,7 +30,7 @@ class AnalyzerResult(AnalyzerResultBase): + '#roslynator-command-line-tool-' def __init__(self): - super(AnalyzerResult, self).__init__() + super().__init__() self.__file_cache: Dict[str, File] = {} def get_reports( @@ -82,15 +82,15 @@ def __parse_diag( Returns the Report from the parsed diagnostic or None if something goes wrong. """ - id = diag.attrib.get('Id') + diag_id = diag.attrib.get('Id') - filePathElement = diag.find('FilePath') - if filePathElement is None: - LOG.warning("Diagnostic does not belong to a file: %s", id) + file_path_element = diag.find('FilePath') + if file_path_element is None: + LOG.warning("Diagnostic does not belong to a file: %s", diag_id) return None source_file_path = os.path.join(os.path.dirname(input_file_path), - filePathElement.text) + file_path_element.text) if not os.path.exists(source_file_path): LOG.warning("Source file does not exist: %s", source_file_path) return None @@ -105,5 +105,5 @@ def __parse_diag( int(line), int(column), message, - id + diag_id ) diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/parser.py index 2d63d0ea24..f87be15505 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/sanitizers/parser.py @@ -32,7 +32,7 @@ class SANParser(BaseParser): line_re = re.compile(r'') def __init__(self): - super(SANParser, self).__init__() + super().__init__() # Regex for parsing stack trace line. # It has the following format: diff --git a/tools/report-converter/codechecker_report_converter/analyzers/smatch/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/smatch/parser.py index 8c3b5ca4d6..595f414102 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/smatch/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/smatch/parser.py @@ -25,7 +25,7 @@ class Parser(BaseParser): """ def __init__(self, analyzer_result): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sparse/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sparse/parser.py index 59894446b8..6a59ddd08e 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/sparse/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/sparse/parser.py @@ -26,7 +26,7 @@ class Parser(BaseParser): """ def __init__(self, analyzer_result): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result @@ -59,7 +59,7 @@ def _parse_line( """ Parse the given line. """ match = self.message_line_re.match(line) - if (match is None): + if match is None: return [], next(it) file_path = os.path.normpath( @@ -100,4 +100,4 @@ def _parse_line( report.line, report.column)) - return [report], line + return [report], line diff --git a/tools/report-converter/codechecker_report_converter/analyzers/sphinx/parser.py b/tools/report-converter/codechecker_report_converter/analyzers/sphinx/parser.py index 816aa0aa15..29325b50e9 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/sphinx/parser.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/sphinx/parser.py @@ -25,7 +25,7 @@ class Parser(BaseParser): """ def __init__(self, analyzer_result): - super(Parser, self).__init__() + super().__init__() self.analyzer_result = analyzer_result diff --git a/tools/report-converter/codechecker_report_converter/analyzers/spotbugs/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/spotbugs/analyzer_result.py index e5fb6d5a73..6dc0e4427e 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/spotbugs/analyzer_result.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/spotbugs/analyzer_result.py @@ -29,7 +29,7 @@ class AnalyzerResult(AnalyzerResultBase): URL = 'https://spotbugs.github.io' def __init__(self): - super(AnalyzerResult, self).__init__() + super().__init__() self.__project_paths = [] self.__file_cache: Dict[str, File] = {} @@ -74,6 +74,7 @@ def __get_abs_path(self, source_line): return full_path LOG.warning("No source file found: %s", source_path) + return None def __parse_analyzer_result(self, analyzer_result: str): """ Parse the given analyzer result xml file. @@ -91,6 +92,8 @@ def __parse_analyzer_result(self, analyzer_result: str): "give a valid xml file with messages generated by " "SpotBugs.", analyzer_result) + return None + def __get_project_paths(self, root): """ Get project paths from the bug collection. """ paths = [] @@ -116,7 +119,7 @@ def __parse_bug(self, bug): source_line = bug.find('SourceLine') source_path = self.__get_abs_path(source_line) if not source_path: - return + return None line = source_line.attrib.get('start') col = 0 diff --git a/tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py b/tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py index 7f2a45cc54..79dca3db26 100644 --- a/tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py +++ b/tools/report-converter/codechecker_report_converter/analyzers/tslint/analyzer_result.py @@ -28,31 +28,30 @@ class AnalyzerResult(AnalyzerResultBase): NAME = 'TSLint' URL = 'https://palantir.github.io/tslint' - def get_reports(self, result_file_path: str) -> List[Report]: + def get_reports(self, file_path: str) -> List[Report]: """ Parse the given analyzer result. """ reports: List[Report] = [] - if not os.path.exists(result_file_path): - LOG.error("Report file does not exist: %s", result_file_path) + if not os.path.exists(file_path): + LOG.error("Report file does not exist: %s", file_path) return reports try: - with open(result_file_path, 'r', + with open(file_path, 'r', encoding="utf-8", errors="ignore") as report_f: bugs = json.load(report_f) except (IOError, json.decoder.JSONDecodeError): LOG.error("Failed to parse the given analyzer result '%s'. Please " "give a valid json file generated by TSLint.", - result_file_path) + file_path) return reports file_cache: Dict[str, File] = {} for bug in bugs: - file_path = os.path.join( - os.path.dirname(result_file_path), bug.get('name')) + fp = os.path.join(os.path.dirname(file_path), bug.get('name')) - if not os.path.exists(file_path): - LOG.warning("Source file does not exists: %s", file_path) + if not os.path.exists(fp): + LOG.warning("Source file does not exists: %s", fp) continue end_pos = bug['startPosition'] @@ -60,7 +59,7 @@ def get_reports(self, result_file_path: str) -> List[Report]: col = int(end_pos['character'] + 1) reports.append(Report( - get_or_create_file(os.path.abspath(file_path), file_cache), + get_or_create_file(os.path.abspath(fp), file_cache), line, col, bug['failure'], bug['ruleName'] )) diff --git a/tools/report-converter/codechecker_report_converter/cli.py b/tools/report-converter/codechecker_report_converter/cli.py index 782a9f07c6..fe73a71cc4 100755 --- a/tools/report-converter/codechecker_report_converter/cli.py +++ b/tools/report-converter/codechecker_report_converter/cli.py @@ -9,7 +9,6 @@ import argparse -from collections.abc import Iterable, Sequence import glob import importlib import logging @@ -31,6 +30,8 @@ sys.path.insert(0, os.path.dirname(current_dir)) +# The following imports must come after the previous sys.path.insert() section. +# pylint: disable=wrong-import-position from codechecker_report_converter.report.report_file import \ SUPPORTED_ANALYZER_TYPES, SUPPORTED_ANALYZER_EXTENSIONS from codechecker_report_converter.report.parser import plist @@ -50,7 +51,6 @@ class RawDescriptionDefaultHelpFormatter( ): """ Adds default values to argument help and retains any formatting in descriptions. """ - pass # Load supported converters dynamically. @@ -61,26 +61,23 @@ class RawDescriptionDefaultHelpFormatter( analyzers = sorted(glob.glob(os.path.join( analyzers_dir_path, '**', 'analyzer_result.py'), recursive=True)) for analyzer_path in analyzers: - analyzer_module = '.'.join(os.path.relpath( + ANALYZER_MODULE = '.'.join(os.path.relpath( os.path.splitext(analyzer_path)[0], analyzers_dir_path).split(os.path.sep)) - module_name = f"codechecker_report_converter.analyzers.{analyzer_module}" + module_name = f"codechecker_report_converter.analyzers.{ANALYZER_MODULE}" - try: - module = importlib.import_module(module_name) + module = importlib.import_module(module_name) - if hasattr(module, "AnalyzerResult"): - analyzer_result = getattr(module, "AnalyzerResult") - supported_converters[analyzer_result.TOOL_NAME] = analyzer_result - except ModuleNotFoundError: - raise + if hasattr(module, "AnalyzerResult"): + analyzer_result = getattr(module, "AnalyzerResult") + supported_converters[analyzer_result.TOOL_NAME] = analyzer_result supported_metadata_keys = ["analyzer_command", "analyzer_version"] def transform_output( - analyzer_result: Iterable[str], + analyzer_results: Iterable[str], parser_type: str, output_dir: str, file_name: str, @@ -99,7 +96,7 @@ def transform_output( parser = supported_converters[parser_type]() parser.transform( - analyzer_result, output_dir, export_type, file_name, metadata) + analyzer_results, output_dir, export_type, file_name, metadata) def process_metadata(metadata) -> Tuple[Dict[str, str], Dict[str, str]]: @@ -154,7 +151,7 @@ def __call__( had_nonexistent_path = False for path in values: if not os.path.exists(path): - LOG.error(f"File or directory '{path}' does not exist!") + LOG.error("File or directory '%s' does not exist!", path) had_nonexistent_path = True elif os.path.isfile(path): all_files.append(path) @@ -216,7 +213,7 @@ def __add_arguments_to_parser(parser): "directory will be stored to a running " "CodeChecker server. It has the following " "format: key=value. Valid key values are: " - "{0}.".format(', '.join(supported_metadata_keys))) + f"{', '.join(supported_metadata_keys)}.") parser.add_argument('--filename', type=str, @@ -260,10 +257,9 @@ def main(): which can be stored to a CodeChecker web server.""", epilog=""" Supported analyzers: -{0}""".format('\n'.join([" {0} - {1}, {2}".format( - tool_name, - supported_converters[tool_name].NAME, - supported_converters[tool_name].URL) +{0}""".format('\n'.join([f" {tool_name} - " + f"{supported_converters[tool_name].NAME}, " + f"{supported_converters[tool_name].URL}" for tool_name in sorted(supported_converters)])), formatter_class=RawDescriptionDefaultHelpFormatter ) diff --git a/tools/report-converter/codechecker_report_converter/report/__init__.py b/tools/report-converter/codechecker_report_converter/report/__init__.py index 712659af7e..db34797059 100644 --- a/tools/report-converter/codechecker_report_converter/report/__init__.py +++ b/tools/report-converter/codechecker_report_converter/report/__init__.py @@ -180,10 +180,10 @@ class BugPathPosition: def __init__( self, file: File, - range: Optional[Range] + file_range: Optional[Range] ): self.file = file - self.range = range + self.range = file_range def to_json(self) -> Dict: """ Creates a JSON dictionary. """ @@ -212,9 +212,9 @@ def __init__( file: File, line: int, column: int, - range: Optional[Range] = None + file_range: Optional[Range] = None ): - super(BugPathEvent, self).__init__(file, range) + super().__init__(file, file_range) # Range can provide more precise location information than line and # column. Use that instead of these fields. @@ -257,18 +257,16 @@ def __init__( file: File, line: int, column: int, - range: Optional[Range] = None + file_range: Optional[Range] = None ): - super(MacroExpansion, self).__init__( - message, file, line, column, range) - + super().__init__(message, file, line, column, file_range) self.name = name def to_json(self) -> Dict: """ Creates a JSON dictionary. """ return { "name": self.name, - **super(MacroExpansion, self).to_json() + **super().to_json() } def __repr__(self): @@ -309,7 +307,7 @@ def __init__( report_hash: Optional[str] = None, analyzer_name: Optional[str] = None, category: Optional[str] = None, - type: Optional[str] = None, + type: Optional[str] = None, # pylint: disable=redefined-builtin analyzer_result_file_path: Optional[str] = None, source_line: Optional[str] = None, bug_path_events: Optional[List[BugPathEvent]] = None, diff --git a/tools/report-converter/codechecker_report_converter/report/hash.py b/tools/report-converter/codechecker_report_converter/report/hash.py index c7682b66cd..a5f4854f03 100644 --- a/tools/report-converter/codechecker_report_converter/report/hash.py +++ b/tools/report-converter/codechecker_report_converter/report/hash.py @@ -182,7 +182,7 @@ def get_report_hash(report: Report, hash_type: HashType) -> str: elif hash_type == HashType.DIAGNOSTIC_MESSAGE: hash_content = __get_report_hash_diagnostic_message(report) else: - raise Exception("Invalid report hash type: " + str(hash_type)) + raise ValueError("Invalid report hash type: " + str(hash_type)) return __str_to_hash('|||'.join(hash_content)) diff --git a/tools/report-converter/codechecker_report_converter/report/output/gerrit.py b/tools/report-converter/codechecker_report_converter/report/output/gerrit.py index 470cdb8446..e0461489dc 100644 --- a/tools/report-converter/codechecker_report_converter/report/output/gerrit.py +++ b/tools/report-converter/codechecker_report_converter/report/output/gerrit.py @@ -97,7 +97,7 @@ def __convert_reports(reports: List[Report], # Skip the report if it is not in the changed files. if changed_file_path and not \ - any([file_name.endswith(c) for c in changed_files]): + any(file_name.endswith(c) for c in changed_files): report_messages_in_unchanged_files.append(review_comment_msg) continue diff --git a/tools/report-converter/codechecker_report_converter/report/output/html/cli.py b/tools/report-converter/codechecker_report_converter/report/output/html/cli.py index c53d3b33fe..2104fd8205 100644 --- a/tools/report-converter/codechecker_report_converter/report/output/html/cli.py +++ b/tools/report-converter/codechecker_report_converter/report/output/html/cli.py @@ -18,6 +18,8 @@ sys.path.insert(0, os.path.abspath( os.path.join(__file__, *[os.path.pardir] * 4))) +# The following imports must come after the previous sys.path.insert() section. +# pylint: disable=wrong-import-position from codechecker_report_converter.report.output.html.html import HtmlBuilder, \ parse @@ -71,17 +73,17 @@ def main(): html_builder.create_index_html(args.output_dir) html_builder.create_statistics_html(args.output_dir) - print('\nTo view statistics in a browser run:\n> firefox {0}'.format( - os.path.join(args.output_dir, 'statistics.html'))) + print('\nTo view statistics in a browser run:\n> firefox ' + f"{os.path.join(args.output_dir, 'statistics.html')}") - print('\nTo view the results in a browser run:\n> firefox {0}'.format( - os.path.join(args.output_dir, 'index.html'))) + print('\nTo view the results in a browser run:\n> firefox ' + f"{os.path.join(args.output_dir, 'index.html')}") if changed_source_files: changed_files = '\n'.join([' - ' + f for f in changed_source_files]) print("\nThe following source file contents changed since the " - "latest analysis:\n{0}\nPlease analyze your project again to " - "update the reports!".format(changed_files)) + "latest analysis:\n{changed_files}\nPlease analyze your project " + "again to update the reports!") if __name__ == "__main__": diff --git a/tools/report-converter/codechecker_report_converter/report/output/html/html.py b/tools/report-converter/codechecker_report_converter/report/output/html/html.py index a7784bf2d8..c102199cfd 100644 --- a/tools/report-converter/codechecker_report_converter/report/output/html/html.py +++ b/tools/report-converter/codechecker_report_converter/report/output/html/html.py @@ -151,9 +151,8 @@ def __init__( # Get the content of the HTML layout dependencies. self._tag_contents = {} - for tag in self._layout_tag_files: - self._tag_contents[tag] = get_file_content( - self._layout_tag_files[tag]) + for tag, filepath in self._layout_tag_files.items(): + self._tag_contents[tag] = get_file_content(filepath) def get_severity(self, checker_name: str) -> str: """ Returns severity level for the given checker name. """ @@ -349,12 +348,12 @@ def severity_order(severity: str) -> int: num_of_analyzer_result_files = len(self.generated_html_reports) num_of_reports = 0 - for html_file in self.generated_html_reports: - num_of_reports += len(self.generated_html_reports[html_file]) + for reports in self.generated_html_reports.values(): + num_of_reports += len(reports) checker_statistics: Dict[str, int] = defaultdict(int) - for html_file in self.generated_html_reports: - for report in self.generated_html_reports[html_file]: + for reports in self.generated_html_reports.values(): + for report in reports: checker = report['checker']['name'] checker_statistics[checker] += 1 @@ -364,16 +363,15 @@ def severity_order(severity: str) -> int: with io.StringIO() as string: for checker_name in sorted(checker_statistics): severity = self.get_severity(checker_name) - string.write(''' - - {0} - - - - {2} - - '''.format(checker_name, severity.lower(), - checker_statistics[checker_name])) + string.write(f''' + + {checker_name} + + + + {checker_statistics[checker_name]} + +''') checker_rows.append([checker_name, severity, str(checker_statistics[checker_name])]) severity_statistics[severity] += \ @@ -385,14 +383,14 @@ def severity_order(severity: str) -> int: with io.StringIO() as string: for severity in sorted(severity_statistics, key=severity_order): num = severity_statistics[severity] - string.write(''' - - - - - {1} - - '''.format(severity.lower(), num)) + string.write(f''' + + + + + {num} + +''') severity_rows.append([severity, str(num)]) severity_statistics_content = string.getvalue() @@ -436,7 +434,7 @@ def convert( file content change. """ if not reports: - LOG.info(f'No report data in {file_path} file.') + LOG.info('No report data in %s file.', file_path) return set() html_filename = f"{os.path.basename(file_path)}.html" @@ -447,7 +445,7 @@ def convert( if changed_files: return changed_files - LOG.info(f"Html file was generated: {html_output_path}") + LOG.info("Html file was generated: %s", html_output_path) return changed_files @@ -494,7 +492,7 @@ def parse( "analyzer result file.", file_path) continue - LOG.info(f"\nParsing input file '%s'", file_path) + LOG.info("\nParsing input file '%s'", file_path) reports = report_file.get_reports(file_path) changed_source = convert(file_path, reports, output_path, html_builder) diff --git a/tools/report-converter/codechecker_report_converter/report/output/plaintext.py b/tools/report-converter/codechecker_report_converter/report/output/plaintext.py index c013e13314..9d1fd37887 100644 --- a/tools/report-converter/codechecker_report_converter/report/output/plaintext.py +++ b/tools/report-converter/codechecker_report_converter/report/output/plaintext.py @@ -137,8 +137,11 @@ def get_file_report_map( source_file = __get_source_file_for_analyzer_result_file( input_file_path, metadata) - # Add source file to the map if it doesn't exists. + # Add source file to the map if it doesn't exist. if source_file: + # This statement is not pointless. The key should be inserted only + # if no value belongs to it from earlier. + # pylint: disable=pointless-statement file_report_map[source_file] return file_report_map diff --git a/tools/report-converter/codechecker_report_converter/report/parser/base.py b/tools/report-converter/codechecker_report_converter/report/parser/base.py index bb4de98f99..84cebcab56 100644 --- a/tools/report-converter/codechecker_report_converter/report/parser/base.py +++ b/tools/report-converter/codechecker_report_converter/report/parser/base.py @@ -35,9 +35,6 @@ def load_json(path: str): try: with open(path, 'r', encoding='utf-8', errors='ignore') as handle: ret = json.load(handle) - except IOError as ex: - LOG.warning("Failed to open json file: %s", path) - LOG.warning(ex) except OSError as ex: LOG.warning("Failed to open json file: %s", path) LOG.warning(ex) diff --git a/tools/report-converter/codechecker_report_converter/report/parser/plist.py b/tools/report-converter/codechecker_report_converter/report/parser/plist.py index 2a02f88eb5..3f79878f06 100644 --- a/tools/report-converter/codechecker_report_converter/report/parser/plist.py +++ b/tools/report-converter/codechecker_report_converter/report/parser/plist.py @@ -20,11 +20,7 @@ from typing import Any, BinaryIO, Dict, List, Optional, Tuple from xml.parsers.expat import ExpatError - -if sys.version_info >= (3, 8): - from typing import TypedDict # pylint: disable=no-name-in-module -else: - from mypy_extensions import TypedDict +import lxml from codechecker_report_converter.report import \ BugPathEvent, BugPathPosition, \ @@ -37,10 +33,14 @@ from codechecker_report_converter.report.parser.base import AnalyzerInfo, \ BaseParser, get_tool_info +if sys.version_info >= (3, 8): + from typing import TypedDict # pylint: disable=no-name-in-module +else: + from mypy_extensions import TypedDict + LOG = logging.getLogger('report-converter') - EXTENSION = 'plist' PlistItem = Any @@ -99,14 +99,12 @@ def __init__(self, dict_type=dict): self.parser = XMLParser(target=self.event_handler) def parse(self, fileobj): - # pylint: disable=no-name-in-module - from lxml.etree import parse, XMLSyntaxError - try: - parse(fileobj, self.parser) - except XMLSyntaxError as ex: + # pylint: disable=c-extension-no-member + lxml.etree.parse(fileobj, self.parser) + except lxml.etree.XMLSyntaxError as ex: LOG.error("Invalid plist file '%s': %s", fileobj.name, ex) - return + return None return self.root @@ -158,7 +156,7 @@ def parse(fp: BinaryIO): except (ExpatError, TypeError, AttributeError) as err: LOG.warning('Invalid plist file') LOG.warning(err) - return + return None except ImportError: LOG.debug("lxml library is not available. Use plistlib to parse plist " "files.") @@ -169,7 +167,8 @@ def parse(fp: BinaryIO): plistlib.InvalidFileException) as err: LOG.warning('Invalid plist file') LOG.warning(err) - return + + return None def get_file_index_map( @@ -237,8 +236,8 @@ def get_reports( traceback.print_exc() LOG.warning(type(ex)) LOG.warning(ex) - finally: - return reports + + return reports def __create_report( self, @@ -319,7 +318,7 @@ def __get_bug_path_events( file=files[location['file']], line=location['line'], column=location['col'], - range=Range( + file_range=Range( start_loc['line'], start_loc['col'], end_loc['line'], end_loc['col']))) @@ -358,13 +357,13 @@ def __get_bug_path_positions( if edge: bug_path_positions.append(BugPathPosition( file=files[edge[1]['file']], - range=Range( + file_range=Range( edge[0]['line'], edge[0]['col'], edge[1]['line'], edge[1]['col']))) bug_path_positions.append(BugPathPosition( file=files[edges['end'][1]['file']], - range=Range( + file_range=Range( edges['end'][0]['line'], edges['end'][0]['col'], edges['end'][1]['line'], edges['end'][1]['col']))) @@ -393,7 +392,7 @@ def __get_notes( file=files[location['file']], line=location['line'], column=location['col'], - range=Range( + file_range=Range( start_loc['line'], start_loc['col'], end_loc['line'], end_loc['col']))) @@ -419,7 +418,7 @@ def __get_macro_expansions( file=files[location['file']], line=location['line'], column=location['col'], - range=Range( + file_range=Range( start_loc['line'], start_loc['col'], end_loc['line'], end_loc['col']))) @@ -520,7 +519,7 @@ def convert( macro_expansion, file_index_map)) if report.annotations: - diagnostic["report-annotation"] = dict() + diagnostic["report-annotation"] = {} for key, value in report.annotations.items(): diagnostic["report-annotation"][key] = value @@ -536,7 +535,6 @@ def write(self, data: Any, output_file_path: str): except TypeError as err: LOG.error('Failed to write plist file: %s', output_file_path) LOG.error(err) - import traceback traceback.print_exc() def _get_bug_path_event_range(self, event: BugPathEvent) -> Range: @@ -614,13 +612,15 @@ def _create_note( def _create_range( self, - range: Range, + file_range: Range, file_idx: int ) -> List: """ Creates a range. """ return [ - self._create_location(range.start_line, range.start_col, file_idx), - self._create_location(range.end_line, range.end_col, file_idx)] + self._create_location( + file_range.start_line, file_range.start_col, file_idx), + self._create_location( + file_range.end_line, file_range.end_col, file_idx)] def _create_macro_expansion( self, @@ -637,20 +637,21 @@ def _create_macro_expansion( def replace_report_hash( self, - plist_file_path: str, + analyzer_result_file_path: str, hash_type=HashType.CONTEXT_FREE ): """ Override hash in the given file by using the given version hash. """ try: - with open(plist_file_path, 'rb+') as f: + with open(analyzer_result_file_path, 'rb+') as f: plist = plistlib.load(f) f.seek(0) f.truncate() metadata = plist.get('metadata') - analyzer_result_dir_path = os.path.dirname(plist_file_path) + analyzer_result_dir_path = \ + os.path.dirname(analyzer_result_file_path) file_cache: Dict[str, File] = {} files = get_file_index_map( @@ -658,7 +659,7 @@ def replace_report_hash( for diag in plist['diagnostics']: report = self.__create_report( - plist_file_path, diag, files, metadata) + analyzer_result_file_path, diag, files, metadata) diag['issue_hash_content_of_line_in_context'] = \ get_report_hash(report, hash_type) @@ -666,18 +667,18 @@ def replace_report_hash( except (TypeError, AttributeError, plistlib.InvalidFileException) as err: LOG.warning('Failed to process plist file: %s wrong file format?', - plist_file_path) + analyzer_result_file_path) LOG.warning(err) except IndexError as iex: LOG.warning('Indexing error during processing plist file %s', - plist_file_path) + analyzer_result_file_path) LOG.warning(type(iex)) LOG.warning(repr(iex)) _, _, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) except Exception as ex: LOG.warning('Error during processing reports from the plist ' - 'file: %s', plist_file_path) + 'file: %s', analyzer_result_file_path) traceback.print_exc() LOG.warning(type(ex)) LOG.warning(ex) diff --git a/tools/report-converter/codechecker_report_converter/report/parser/sarif.py b/tools/report-converter/codechecker_report_converter/report/parser/sarif.py index 14f4bbf234..856bf544cf 100644 --- a/tools/report-converter/codechecker_report_converter/report/parser/sarif.py +++ b/tools/report-converter/codechecker_report_converter/report/parser/sarif.py @@ -12,10 +12,9 @@ from sarif import loader -from typing import Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple from urllib.parse import urlparse -from typing import Any, List, Optional, Tuple from codechecker_report_converter.report import BugPathEvent, \ BugPathPosition, File, MacroExpansion, get_or_create_file, Range, Report @@ -48,19 +47,19 @@ def __init__(self): class Parser(BaseParser): def get_reports( self, - result_file_path: str, - source_dir_path: Optional[str] = None + analyzer_result_file_path: str, + _: Optional[str] = None ) -> List[Report]: """ Get reports from the given analyzer result file. """ - if not self.has_any_runs(result_file_path): + if not self.has_any_runs(analyzer_result_file_path): return [] reports: List[Report] = [] - self.result_file_path = result_file_path + self.result_file_path = analyzer_result_file_path self.had_error = False - data = loader.load_sarif_file(result_file_path) + data = loader.load_sarif_file(analyzer_result_file_path) for run in data.runs: rules = self._get_rules(run.run_data) @@ -93,7 +92,7 @@ def get_reports( report = Report( file, rng.start_line, rng.start_col, message, rule_id, # TODO: Add severity. - analyzer_result_file_path=result_file_path, + analyzer_result_file_path=analyzer_result_file_path, bug_path_events=bug_path_events, bug_path_positions=thread_flow_info.bug_path_positions, notes=thread_flow_info.notes, @@ -215,16 +214,17 @@ def _resolve_uri_base_id(self, uri_base_id: str, postfix: str): if not self.original_uri_base_ids: LOG.error("Missing 'originalUriBaseIds' (sarif v2.1.0 §3.14.14) " - f"in '{self.result_file_path}'.") + "in '%s'.", self.result_file_path) LOG.error(error_str) self.had_error = True return "" original = self.original_uri_base_ids.get(uri_base_id) if not original: - LOG.error(f"Missing entry for '{uri_base_id} in" - "'originalUriBaseIds' (sarif v2.1.0 §3.14.14)" - f"in '{self.result_file_path}'.") + LOG.error("Missing entry for '%s in " + "'originalUriBaseIds' (sarif v2.1.0 §3.14.14) in '%s'.", + uri_base_id, + self.result_file_path) LOG.error(error_str) self.had_error = True return "" @@ -232,8 +232,10 @@ def _resolve_uri_base_id(self, uri_base_id: str, postfix: str): abs_uri_prefix = original.get("uri") if not abs_uri_prefix: LOG.warning("Missing 'uri' (sarif v2.1.0 §3.4.3) for " - f"'{uri_base_id} in 'originalUriBaseIds' (sarif " - f"v2.1.0 §3.14.14) in '{self.result_file_path}'.") + "'%s in 'originalUriBaseIds' (sarif " + "v2.1.0 §3.14.14) in '%s'.", + uri_base_id, + self.result_file_path) LOG.error(error_str) self.had_error = True return "" @@ -265,7 +267,7 @@ def _get_file( uri_parsed = urlparse(uri) if uri_parsed is None: - LOG.warning(f"Failed to urlparse {uri}!") + LOG.warning("Failed to urlparse %s!", uri) return None file_path = os.path.join(uri_parsed.netloc, uri_parsed.path) @@ -449,4 +451,3 @@ def replace_report_hash( """ Override hash in the given file by using the given version hash. """ - pass diff --git a/tools/report-converter/codechecker_report_converter/report/report_file.py b/tools/report-converter/codechecker_report_converter/report/report_file.py index a10d7ef8d1..41fa9c6db5 100644 --- a/tools/report-converter/codechecker_report_converter/report/report_file.py +++ b/tools/report-converter/codechecker_report_converter/report/report_file.py @@ -25,7 +25,7 @@ SUPPORTED_ANALYZER_EXTENSIONS = \ - tuple([f".{ext}" for ext in SUPPORTED_ANALYZER_TYPES]) + tuple(f".{ext}" for ext in SUPPORTED_ANALYZER_TYPES) def is_supported(analyzer_result_file_path: str) -> bool: @@ -47,6 +47,8 @@ def get_parser( if analyzer_result_file_path.endswith(sarif.EXTENSION): return sarif.Parser(checker_labels, file_cache) + assert False, f"Unknown extension for file {analyzer_result_file_path}" + def get_reports( analyzer_result_file_path: str, @@ -59,10 +61,11 @@ def get_reports( if parser: return parser.get_reports(analyzer_result_file_path, source_dir_path) - else: - LOG.error(f"Found no parsers to parse {analyzer_result_file_path}! " - "Supported file extension types are " - f"{SUPPORTED_ANALYZER_EXTENSIONS}.") + + LOG.error("Found no parsers to parse %s! " + "Supported file extension types are %s.", + analyzer_result_file_path, + SUPPORTED_ANALYZER_EXTENSIONS) return [] diff --git a/tools/report-converter/codechecker_report_converter/report/reports.py b/tools/report-converter/codechecker_report_converter/report/reports.py index 0eeff2f9e3..65b0bf1c72 100644 --- a/tools/report-converter/codechecker_report_converter/report/reports.py +++ b/tools/report-converter/codechecker_report_converter/report/reports.py @@ -44,7 +44,7 @@ def get_changed_files(reports: List[Report]): def dump_changed_files(changed_files: Set[str]): """ Dump changed files. """ if not changed_files: - return None + return file_paths = '\n'.join([' - ' + f for f in changed_files]) LOG.warning("The following source file contents changed or missing since " diff --git a/tools/report-converter/codechecker_report_converter/report/statistics.py b/tools/report-converter/codechecker_report_converter/report/statistics.py index c995179282..2cd1e4f3d7 100644 --- a/tools/report-converter/codechecker_report_converter/report/statistics.py +++ b/tools/report-converter/codechecker_report_converter/report/statistics.py @@ -34,7 +34,7 @@ def __init__(self): def _write_severity_statistics(self, out=sys.stdout): """ Print severity statistics if it's available. """ if not self.severity_statistics: - return None + return out.write("\n----==== Severity Statistics ====----\n") header = ["Severity", "Number of reports"] @@ -45,7 +45,7 @@ def _write_severity_statistics(self, out=sys.stdout): def _write_checker_statistics(self, out=sys.stdout): """ Print checker statistics if it's available. """ if not self.checker_statistics: - return None + return out.write("\n----==== Checker Statistics ====----\n") header = ["Checker name", "Severity", "Number of reports"] @@ -57,7 +57,7 @@ def _write_checker_statistics(self, out=sys.stdout): def _write_file_statistics(self, out=sys.stdout): """ Print file statistics if it's available. """ if not self.file_statistics: - return None + return out.write("\n----==== File Statistics ====----\n") header = ["File name", "Number of reports"] @@ -76,7 +76,7 @@ def _write_summary(self, out=sys.stdout): out.write(twodim.to_table(statistics_rows, False)) out.write("\n----=================----\n") - def write(self, out=sys.stdout): + def write(self, _=sys.stdout): """ Print statistics. """ self._write_severity_statistics() self._write_checker_statistics() diff --git a/tools/report-converter/codechecker_report_converter/twodim.py b/tools/report-converter/codechecker_report_converter/twodim.py index 22ed74fbac..d22403d3ba 100644 --- a/tools/report-converter/codechecker_report_converter/twodim.py +++ b/tools/report-converter/codechecker_report_converter/twodim.py @@ -37,7 +37,7 @@ def to_str( if format_name == 'rows': return to_rows(rows) - elif format_name == 'table' or format_name == 'plaintext': + elif format_name in ('table', 'plaintext'): # TODO: 'plaintext' for now to support the 'CodeChecker cmd' interface. return to_table(all_rows, True, separate_footer) elif format_name == 'csv': @@ -87,6 +87,7 @@ def to_rows(lines: Iterable[str]) -> str: try: str_parts.append(print_string.format(*line)) except IndexError: + # pylint: disable=raise-missing-from raise TypeError("One of the rows have a different number of " "columns than the others") @@ -135,6 +136,7 @@ def to_table( try: str_parts.append(print_string.format(*line)) except IndexError: + # pylint: disable=raise-missing-from raise TypeError("One of the rows have a different number of " "columns than the others") if i == 0 and separate_head: @@ -158,10 +160,7 @@ def to_csv(lines: Iterable[str]) -> str: ['' if e is None else e for e in line] for line in lines] # Count the columns. - columns = 0 - for line in lns: - if len(line) > columns: - columns = len(line) + columns = 0 if len(lns) == 0 else max(map(len, lns)) print_string = "" for i in range(columns): @@ -177,6 +176,7 @@ def to_csv(lines: Iterable[str]) -> str: try: str_parts.append(print_string.format(*line)) except IndexError: + # pylint: disable=raise-missing-from raise TypeError("One of the rows have a different number of " "columns than the others") @@ -196,6 +196,6 @@ def to_dictlist(key_list, lines): res = [] for line in lines: - res.append({key: value for (key, value) in zip(key_list, line)}) + res.append(dict(zip(key_list, line))) return res diff --git a/tools/report-converter/requirements_py/dev/requirements.txt b/tools/report-converter/requirements_py/dev/requirements.txt index b24aab957a..187a14503d 100644 --- a/tools/report-converter/requirements_py/dev/requirements.txt +++ b/tools/report-converter/requirements_py/dev/requirements.txt @@ -1,6 +1,6 @@ pytest==7.3.1 sarif-tools==1.0.0 pycodestyle==2.12.0 -pylint==2.8.2 +pylint==3.2.4 portalocker==2.2.1 mypy==1.7.1 diff --git a/tools/report-converter/tests/unit/analyzers/test_gcc_parser.py b/tools/report-converter/tests/unit/analyzers/test_gcc_parser.py index caacd71c62..f9fbe9fc65 100644 --- a/tools/report-converter/tests/unit/analyzers/test_gcc_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_gcc_parser.py @@ -103,5 +103,4 @@ def test_gcc_transform_single_file(self): with open(plist_file, mode='rb') as pfile: exp = plistlib.load(pfile) - self.maxDiff = None self.assertEqual(res, exp) diff --git a/tools/report-converter/tests/unit/analyzers/test_ubsan_parser.py b/tools/report-converter/tests/unit/analyzers/test_ubsan_parser.py index becbee9de9..d28b061754 100644 --- a/tools/report-converter/tests/unit/analyzers/test_ubsan_parser.py +++ b/tools/report-converter/tests/unit/analyzers/test_ubsan_parser.py @@ -96,7 +96,6 @@ def test_ubsan1_nonmatching_msg(self): Test for the test_ubsan1_nonmatching_msg.plist file, where the reported error message doesn't match any of the checkers we recognize. """ - self.maxDiff = None self.__check_analyzer_result( 'ubsan1_nonmatching_msg.out', 'ubsan1.cpp_ubsan.plist', ['files/ubsan1.cpp'], 'ubsan1_nonmatching_msg.plist') diff --git a/tools/report-converter/tests/unit/parser/plist/plist_test_files/empty_file b/tools/report-converter/tests/unit/parser/plist/plist_test_files/empty_file.plist similarity index 100% rename from tools/report-converter/tests/unit/parser/plist/plist_test_files/empty_file rename to tools/report-converter/tests/unit/parser/plist/plist_test_files/empty_file.plist diff --git a/tools/report-converter/tests/unit/parser/plist/test_plist_parser.py b/tools/report-converter/tests/unit/parser/plist/test_plist_parser.py index a4c07d6bbf..57666eeacb 100644 --- a/tools/report-converter/tests/unit/parser/plist/test_plist_parser.py +++ b/tools/report-converter/tests/unit/parser/plist/test_plist_parser.py @@ -146,7 +146,7 @@ def setup_class(cls): def test_empty_file(self): """Plist file is empty.""" - empty_plist = os.path.join(self.__plist_test_files, 'empty_file') + empty_plist = os.path.join(self.__plist_test_files, 'empty_file.plist') reports = report_file.get_reports(empty_plist) self.assertEqual(reports, []) diff --git a/tools/tu_collector/requirements_py/dev/requirements.txt b/tools/tu_collector/requirements_py/dev/requirements.txt index f0492af5b4..20cbbe3930 100644 --- a/tools/tu_collector/requirements_py/dev/requirements.txt +++ b/tools/tu_collector/requirements_py/dev/requirements.txt @@ -1,4 +1,4 @@ pytest==7.3.1 pycodestyle==2.12.0 -pylint==2.8.2 +pylint==3.2.4 mypy==1.7.1 diff --git a/web/client/codechecker_client/blame_info.py b/web/client/codechecker_client/blame_info.py index 0c338fd4cd..ed52db27f9 100644 --- a/web/client/codechecker_client/blame_info.py +++ b/web/client/codechecker_client/blame_info.py @@ -41,13 +41,13 @@ def __get_blame_info(file_path: str): try: repo = Repo(file_path, search_parent_directories=True) if repo.ignored(file_path): - LOG.debug(f"File {file_path} is an ignored file") - return + LOG.debug("File %s is an ignored file", file_path) + return None except InvalidGitRepositoryError: - return + return None except GitCommandError as ex: - LOG.debug(f"Failed to get blame information for {file_path}: {ex}") - return + LOG.debug("Failed to get blame information for %s: %s", file_path, ex) + return None tracking_branch = __get_tracking_branch(repo) @@ -92,6 +92,8 @@ def __get_blame_info(file_path: str): except Exception as ex: LOG.debug("Failed to get blame information for %s: %s", file_path, ex) + return None + def __collect_blame_info_for_files( file_paths: Iterable[str], diff --git a/web/client/codechecker_client/client.py b/web/client/codechecker_client/client.py index e4f3a744a5..730a83446b 100644 --- a/web/client/codechecker_client/client.py +++ b/web/client/codechecker_client/client.py @@ -116,7 +116,7 @@ def login_user(protocol, host, port, username, login=False): else: LOG.info("Logging in using credentials from command line...") pwd = getpass.getpass( - "Please provide password for user '{}': ".format(username)) + f"Please provide password for user '{username}': ") LOG.debug("Trying to login as %s to %s:%d", username, host, port) try: @@ -186,6 +186,8 @@ def perform_auth_for_handler(auth_client, host, port, manager): "cmd login'.") sys.exit(1) + return None + def setup_product_client(protocol, host, port, auth_client=None, product_name=None, diff --git a/web/client/codechecker_client/cmd/cmd.py b/web/client/codechecker_client/cmd/cmd.py index 7ec585877e..6be5de36b6 100644 --- a/web/client/codechecker_client/cmd/cmd.py +++ b/web/client/codechecker_client/cmd/cmd.py @@ -770,12 +770,12 @@ def __register_add(parser): dbmodes = dbmodes.add_mutually_exclusive_group(required=False) - SQLITE_PRODUCT_ENDPOINT_DEFAULT_VAR = '.sqlite' + sqlite_product_endpoint_default_var = '.sqlite' dbmodes.add_argument('--sqlite', type=str, dest="sqlite", metavar='SQLITE_FILE', - default=SQLITE_PRODUCT_ENDPOINT_DEFAULT_VAR, + default=sqlite_product_endpoint_default_var, required=False, help="Path of the SQLite database file to use. " "Not absolute paths will be relative to " @@ -791,7 +791,7 @@ def __register_add(parser): "\"PostgreSQL arguments\" section on how " "to configure the database connection.") - PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR = '' + pgsql_product_endpoint_default_var = '' pgsql = parser.add_argument_group( "PostgreSQL arguments", "Values of these arguments are ignored, unless '--postgresql' is " @@ -819,7 +819,7 @@ def __register_add(parser): pgsql.add_argument('--dbusername', '--db-username', type=str, dest="dbusername", - default=PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR, + default=pgsql_product_endpoint_default_var, required=False, help="Username to use for connection.") @@ -834,7 +834,7 @@ def __register_add(parser): pgsql.add_argument('--dbname', '--db-name', type=str, dest="dbname", - default=PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR, + default=pgsql_product_endpoint_default_var, required=False, help="Name of the database to use.") @@ -847,8 +847,8 @@ def arg_match(options): that are present in the invocation argv.""" matched_args = [] for option in options: - if any([arg if option.startswith(arg) else None - for arg in sys.argv[1:]]): + if any(arg if option.startswith(arg) else None + for arg in sys.argv[1:]): matched_args.append(option) continue @@ -863,21 +863,21 @@ def arg_match(options): psql_args_matching = arg_match(options) if any(psql_args_matching) and \ 'postgresql' not in args: - first_matching_arg = next(iter([match for match - in psql_args_matching])) - parser.error("argument {0}: not allowed without argument " - "--postgresql".format(first_matching_arg)) + first_matching_arg = \ + next(match for match in psql_args_matching) + parser.error(f"argument {first_matching_arg}: not allowed " + "without argument --postgresql") # parser.error() terminates with return code 2. # Some arguments get a dynamic default value that depends on the # value of another argument. - if args.sqlite == SQLITE_PRODUCT_ENDPOINT_DEFAULT_VAR: + if args.sqlite == sqlite_product_endpoint_default_var: args.sqlite = args.endpoint + '.sqlite' - if args.dbusername == PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR: + if args.dbusername == pgsql_product_endpoint_default_var: args.dbusername = args.endpoint - if args.dbname == PGSQL_PRODUCT_ENDPOINT_DEFAULT_VAR: + if args.dbname == pgsql_product_endpoint_default_var: args.dbname = args.endpoint if 'postgresql' not in args: diff --git a/web/client/codechecker_client/cmd/store.py b/web/client/codechecker_client/cmd/store.py index 3aae8845fb..58e7f307a9 100644 --- a/web/client/codechecker_client/cmd/store.py +++ b/web/client/codechecker_client/cmd/store.py @@ -41,6 +41,16 @@ get_report_path_hash from codechecker_report_converter.report.parser.base import AnalyzerInfo +try: + from codechecker_client.blame_info import assemble_blame_info +except ImportError: + def assemble_blame_info(_, __) -> int: + """ + Shim for cases where Git blame info is not gatherable due to + missing libraries. + """ + raise NotImplementedError() + from codechecker_client import client as libclient from codechecker_client import product from codechecker_common import arg, logger, cmd_config @@ -53,16 +63,6 @@ from codechecker_web.shared import webserver_context, host_check from codechecker_web.shared.env import get_default_workspace -try: - from codechecker_client.blame_info import assemble_blame_info -except ImportError: - def assemble_blame_info(_, __) -> int: - """ - Shim for cases where Git blame info is not gatherable due to - missing libraries. - """ - raise NotImplementedError() - LOG = logger.get_logger('system') @@ -143,9 +143,9 @@ def sizeof_fmt(num, suffix='B'): """ for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) + return f"{num:3.1f}{unit}{suffix}" num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) + return f"{num:.1f}Yi{suffix}" def get_file_content_hash(file_path): @@ -309,7 +309,7 @@ def add_arguments_to_parser(parser): def __get_run_name(input_list): """Create a runname for the stored analysis from the input list.""" - # Try to create a name from the metada JSON(s). + # Try to create a name from the metadata JSON(s). names = set() for input_path in input_list: metafile = os.path.join(input_path, "metadata.json") @@ -333,8 +333,8 @@ def __get_run_name(input_list): return name elif len(names) > 1: return "multiple projects: " + ', '.join(names) - else: - return False + + return False def scan_for_review_comment(job: Tuple[str, Iterable[int]]): @@ -451,7 +451,7 @@ def assemble_zip(inputs, skip_file_path = os.path.join(dir_path, 'skip_file') if os.path.exists(skip_file_path): - with open(skip_file_path, 'r') as f: + with open(skip_file_path, 'r', encoding='utf-8') as f: LOG.info("Found skip file %s with the following content:\n%s", skip_file_path, f.read()) @@ -746,7 +746,7 @@ def storing_analysis_statistics(client, inputs, run_name): if not statistics_files: LOG.debug("No analyzer statistics information can be found in the " "report directory.") - return False + return None # Write statistics files to the ZIP file. with zipfile.ZipFile(zip_file, 'a', allowZip64=True) as zipf: @@ -776,6 +776,8 @@ def storing_analysis_statistics(client, inputs, run_name): os.close(zip_file_handle) os.remove(zip_file) + return None + class WatchdogError(Exception): """ @@ -814,12 +816,13 @@ def _timeout_watchdog(timeout: timedelta, trap: int): "pollable asynchronous store" feature is implemented, see: http://github.com/Ericsson/codechecker/issues/3672 """ - def _signal_handler(sig: int, frame): + def _signal_handler(sig: int, _): if sig == trap: signal.signal(trap, signal.SIG_DFL) - raise WatchdogError(timeout, - "Timeout watchdog hit %d seconds (%s)" - % (timeout.total_seconds(), str(timeout))) + raise WatchdogError( + timeout, + f"Timeout watchdog hit {timeout.total_seconds()} " + f"seconds ({str(timeout)})") pid = os.getpid() timer = Timer(timeout.total_seconds(), @@ -856,7 +859,7 @@ def main(args): sys.exit(1) if not host_check.check_zlib(): - raise Exception("zlib is not available on the system!") + raise ModuleNotFoundError("zlib is not available on the system!") # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. diff --git a/web/client/codechecker_client/cmd_line_client.py b/web/client/codechecker_client/cmd_line_client.py index debfbfc178..7b526e0662 100644 --- a/web/client/codechecker_client/cmd_line_client.py +++ b/web/client/codechecker_client/cmd_line_client.py @@ -19,7 +19,7 @@ import sys import shutil import time -from typing import Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Dict, Iterable, List, Optional, Set, Tuple from codechecker_api.codeCheckerDBAccess_v6 import constants, ttypes from codechecker_api_shared.ttypes import RequestFailed @@ -96,6 +96,8 @@ def run_sort_type_str(value: ttypes.RunSortType) -> Optional[str]: elif value == ttypes.RunSortType.CC_VERSION: return 'codechecker_version' + assert False, f"Unknown ttypes.RunSortType: {value}" + def run_sort_type_enum(value: str) -> Optional[ttypes.RunSortType]: """ Returns the given run sort type Thrift enum value. """ @@ -110,21 +112,21 @@ def run_sort_type_enum(value: str) -> Optional[ttypes.RunSortType]: elif value == 'codechecker_version': return ttypes.RunSortType.CC_VERSION + assert False, f"Unknown ttypes.RunSortType value: {str}" + -def get_diff_type(args) -> Union[ttypes.DiffType, None]: +def get_diff_type(args) -> ttypes.DiffType: """ Returns Thrift DiffType value by processing the arguments. """ if 'new' in args: return ttypes.DiffType.NEW - - if 'unresolved' in args: + elif 'unresolved' in args: return ttypes.DiffType.UNRESOLVED - - if 'resolved' in args: + elif 'resolved' in args: return ttypes.DiffType.RESOLVED - return None + assert False, "Unknown ttypes.DiffType: {args}" def get_run_tag(client, run_ids: List[int], tag_name: str): @@ -261,22 +263,22 @@ def skip_report_dir_result( if report_filter.checkerName: checker_name = report.checker_name - if not any([re.match(r'^' + c.replace("*", ".*") + '$', - checker_name, re.IGNORECASE) - for c in report_filter.checkerName]): + if not any(re.match(r'^' + c.replace("*", ".*") + '$', + checker_name, re.IGNORECASE) + for c in report_filter.checkerName): return True if report_filter.filepath: - if not any([re.match(r'^' + f.replace("*", ".*") + '$', - report.file.path, re.IGNORECASE) - for f in report_filter.filepath]): + if not any(re.match(r'^' + f.replace("*", ".*") + '$', + report.file.path, re.IGNORECASE) + for f in report_filter.filepath): return True if report_filter.checkerMsg: checker_msg = report.message - if not any([re.match(r'^' + c.replace("*", ".*") + '$', - checker_msg, re.IGNORECASE) - for c in report_filter.checkerMsg]): + if not any(re.match(r'^' + c.replace("*", ".*") + '$', + checker_msg, re.IGNORECASE) + for c in report_filter.checkerMsg): return True return False @@ -875,7 +877,7 @@ def get_diff_local_dir_remote_run( run_ids, run_names, tag_ids = \ process_run_args(client, remote_run_names) - local_report_hashes = set([r.report_hash for r in report_dir_results]) + local_report_hashes = set(r.report_hash for r in report_dir_results) review_status_filter = ttypes.ReviewStatusRuleFilter() review_status_filter.reportHashes = local_report_hashes @@ -972,7 +974,7 @@ def get_diff_remote_run_local_dir( run_ids, run_names, tag_ids = \ process_run_args(client, remote_run_names) - local_report_hashes = set([r.report_hash for r in report_dir_results]) + local_report_hashes = set(r.report_hash for r in report_dir_results) review_status_filter = ttypes.ReviewStatusRuleFilter() review_status_filter.reportHashes = local_report_hashes @@ -1064,8 +1066,7 @@ def get_diff_remote_runs( default_detection_status = [ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.REOPENED, ttypes.DetectionStatus.UNRESOLVED] - if report_filter.detectionStatus != default_detection_status and \ - report_filter.detectionStatus != []: + if report_filter.detectionStatus not in (default_detection_status, []): LOG.warning("--detection-status is ignored when comparing tags, " "showing reports regardless of detection status.") @@ -1111,8 +1112,8 @@ def get_diff_local_dirs( new_results = [res for res in new_results if res.review_status.status in statuses_str] - base_hashes = set([res.report_hash for res in base_results]) - new_hashes = set([res.report_hash for res in new_results]) + base_hashes = set(res.report_hash for res in base_results) + new_hashes = set(res.report_hash for res in new_results) # Add hashes from the baseline files. base_hashes.update(baseline.get_report_hashes(baseline_files)) @@ -1275,8 +1276,8 @@ def handle_diff_results(args): check_deprecated_arg_usage(args) if 'clean' in args and os.path.isdir(output_dir): - print("Previous analysis results in '{0}' have been removed, " - "overwriting with current results.".format(output_dir)) + print(f"Previous analysis results in '{output_dir}' have been " + "removed, overwriting with current results.") shutil.rmtree(output_dir) if output_dir and not os.path.exists(output_dir): @@ -1489,11 +1490,11 @@ def checker_count(checker_dict, key): severities = [] severity_total = 0 - for key, count in sorted(list(sev_count.items()), - reverse=True): - severities.append(dict( - severity=ttypes.Severity._VALUES_TO_NAMES[key], - reports=count)) + for key, count in sorted(list(sev_count.items()), reverse=True): + severities.append({ + "severity": ttypes.Severity._VALUES_TO_NAMES[key], + "reports": count + }) severity_total += count all_results = [] @@ -1501,15 +1502,16 @@ def checker_count(checker_dict, key): for key, checker_data in sorted(list(all_checkers_dict.items()), key=lambda x: x[1].severity, reverse=True): - all_results.append(dict( - checker=key, - severity=ttypes.Severity._VALUES_TO_NAMES[checker_data.severity], - reports=checker_data.count, - unreviewed=checker_count(unrev_checkers, key), - confirmed=checker_count(confirmed_checkers, key), - false_positive=checker_count(false_checkers, key), - intentional=checker_count(intentional_checkers, key) - )) + all_results.append({ + "checker": key, + "severity": + ttypes.Severity._VALUES_TO_NAMES[checker_data.severity], + "reports": checker_data.count, + "unreviewed": checker_count(unrev_checkers, key), + "confirmed": checker_count(confirmed_checkers, key), + "false_positive": checker_count(false_checkers, key), + "intentional": checker_count(intentional_checkers, key) + }) total['total_reports'] += checker_data.count total['total_unreviewed'] += checker_count(unrev_checkers, key) total['total_confirmed'] += checker_count(confirmed_checkers, key) @@ -1739,10 +1741,11 @@ def handle_import(args): author=review['author'], date=review['date']) - exportData = ttypes.ExportData(comments=comment_data_list, - reviewData=review_data_list) + export_data = ttypes.ExportData( + comments=comment_data_list, + reviewData=review_data_list) - status = client.importData(exportData) + status = client.importData(export_data) if not status: LOG.error("Failed to import data!") sys.exit(1) diff --git a/web/client/codechecker_client/credential_manager.py b/web/client/codechecker_client/credential_manager.py index 2946463c84..d6b700d8a6 100644 --- a/web/client/codechecker_client/credential_manager.py +++ b/web/client/codechecker_client/credential_manager.py @@ -48,7 +48,7 @@ def simplify_credentials(credentials): host = match.group('host') port = match.group('port') - host_port = '{0}:{1}'.format(host, port) if port else host + host_port = f'{host}:{port}' if port else host ret[host_port] = auth_string @@ -124,14 +124,14 @@ def is_autologin_enabled(self): return self.__autologin def get_token(self, host, port): - return self.__tokens.get("{0}:{1}".format(host, port)) + return self.__tokens.get(f"{host}:{port}") def get_auth_string(self, host, port): - ret = self.__save['credentials'].get('{0}:{1}'.format(host, port)) + ret = self.__save['credentials'].get(f'{host}:{port}') if not ret: ret = self.__save['credentials'].get(host) if not ret: - ret = self.__save['credentials'].get('*:{0}'.format(port)) + ret = self.__save['credentials'].get(f'*:{port}') if not ret: ret = self.__save['credentials'].get('*') @@ -139,9 +139,9 @@ def get_auth_string(self, host, port): def save_token(self, host, port, token, destroy=False): if destroy: - self.__tokens.pop('{0}:{1}'.format(host, port), None) + self.__tokens.pop(f'{host}:{port}', None) else: - self.__tokens['{0}:{1}'.format(host, port)] = token + self.__tokens[f'{host}:{port}'] = token with open(self.token_file, 'w', encoding="utf-8", errors="ignore") as scfg: diff --git a/web/client/codechecker_client/helpers/authentication.py b/web/client/codechecker_client/helpers/authentication.py index b0403fe0b9..f222cea661 100644 --- a/web/client/codechecker_client/helpers/authentication.py +++ b/web/client/codechecker_client/helpers/authentication.py @@ -11,76 +11,79 @@ from codechecker_api.Authentication_v6 import codeCheckerAuthentication -from codechecker_client.thrift_call import ThriftClientCall +from codechecker_client.thrift_call import thrift_client_call from .base import BaseClientHelper +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name class ThriftAuthHelper(BaseClientHelper): def __init__(self, protocol, host, port, uri, session_token=None): super().__init__(protocol, host, port, uri, session_token) self.client = codeCheckerAuthentication.Client(self.protocol) - @ThriftClientCall + @thrift_client_call def checkAPIVersion(self): pass # ============= Authentication and session handling ============= - @ThriftClientCall + @thrift_client_call def getAuthParameters(self): pass - @ThriftClientCall + @thrift_client_call def getAcceptedAuthMethods(self): pass - @ThriftClientCall + @thrift_client_call def getAccessControl(self): pass - @ThriftClientCall + @thrift_client_call def performLogin(self, auth_method, auth_string): pass - @ThriftClientCall + @thrift_client_call def destroySession(self): pass # ============= Authorization, permission management ============= - @ThriftClientCall + @thrift_client_call def getPermissions(self, scope): pass - @ThriftClientCall + # pylint: disable=redefined-builtin + @thrift_client_call def getPermissionsForUser(self, scope, extra_params, filter): pass - @ThriftClientCall + @thrift_client_call def getAuthorisedNames(self, permission, extra_params): pass - @ThriftClientCall + @thrift_client_call def addPermission(self, permission, auth_name, is_group, extra_params): pass - @ThriftClientCall + @thrift_client_call def removePermission(self, permission, auth_name, is_group, extra_params): pass - @ThriftClientCall + @thrift_client_call def hasPermission(self, permission, extra_params): pass # ============= Token management ============= - @ThriftClientCall + @thrift_client_call def newToken(self, description): pass - @ThriftClientCall + @thrift_client_call def removeToken(self, token): pass - @ThriftClientCall + @thrift_client_call def getTokens(self): pass diff --git a/web/client/codechecker_client/helpers/base.py b/web/client/codechecker_client/helpers/base.py index a0da3166c8..f304f72e2f 100644 --- a/web/client/codechecker_client/helpers/base.py +++ b/web/client/codechecker_client/helpers/base.py @@ -28,8 +28,6 @@ def __init__(self, protocol, host, port, uri, session_token=None, """ @param get_new_token: a function which can generate a new token. """ - self.__host = host - self.__port = port url = create_product_url(protocol, host, port, uri) self.transport = None diff --git a/web/client/codechecker_client/helpers/configuration.py b/web/client/codechecker_client/helpers/configuration.py index af2b227807..09136e1661 100644 --- a/web/client/codechecker_client/helpers/configuration.py +++ b/web/client/codechecker_client/helpers/configuration.py @@ -11,10 +11,12 @@ from codechecker_api.Configuration_v6 import configurationService -from codechecker_client.thrift_call import ThriftClientCall +from codechecker_client.thrift_call import thrift_client_call from .base import BaseClientHelper +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name class ThriftConfigHelper(BaseClientHelper): def __init__(self, protocol, host, port, uri, session_token=None): super().__init__(protocol, host, port, uri, session_token) @@ -22,6 +24,6 @@ def __init__(self, protocol, host, port, uri, session_token=None): self.client = configurationService.Client(self.protocol) # ----------------------------------------------------------------------- - @ThriftClientCall + @thrift_client_call def getNotificationBannerText(self): pass diff --git a/web/client/codechecker_client/helpers/product.py b/web/client/codechecker_client/helpers/product.py index 0f81569598..86f4fdd4c3 100644 --- a/web/client/codechecker_client/helpers/product.py +++ b/web/client/codechecker_client/helpers/product.py @@ -11,10 +11,12 @@ from codechecker_api.ProductManagement_v6 import codeCheckerProductService -from codechecker_client.thrift_call import ThriftClientCall +from codechecker_client.thrift_call import thrift_client_call from .base import BaseClientHelper +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name class ThriftProductHelper(BaseClientHelper): def __init__(self, protocol, host, port, uri, session_token=None, get_new_token=None): @@ -27,28 +29,28 @@ def __init__(self, protocol, host, port, uri, session_token=None, self.client = codeCheckerProductService.Client(self.protocol) # ----------------------------------------------------------------------- - @ThriftClientCall + @thrift_client_call def getPackageVersion(self): pass # ----------------------------------------------------------------------- - @ThriftClientCall + @thrift_client_call def getProducts(self, product_endpoint_filter, product_name_filter): pass - @ThriftClientCall + @thrift_client_call def getCurrentProduct(self): pass - @ThriftClientCall + @thrift_client_call def getProductConfiguration(self, product_id): pass # ----------------------------------------------------------------------- - @ThriftClientCall + @thrift_client_call def addProduct(self, product): pass - @ThriftClientCall + @thrift_client_call def removeProduct(self, product_id): pass diff --git a/web/client/codechecker_client/helpers/results.py b/web/client/codechecker_client/helpers/results.py index 23c3d683d2..c558cfe040 100644 --- a/web/client/codechecker_client/helpers/results.py +++ b/web/client/codechecker_client/helpers/results.py @@ -11,10 +11,12 @@ from codechecker_api.codeCheckerDBAccess_v6 import codeCheckerDBAccess -from codechecker_client.thrift_call import ThriftClientCall +from codechecker_client.thrift_call import thrift_client_call from .base import BaseClientHelper +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name class ThriftResultsHelper(BaseClientHelper): def __init__(self, protocol, host, port, uri, session_token=None, @@ -27,169 +29,170 @@ def __init__(self, protocol, host, port, uri, session_token=None, self.client = codeCheckerDBAccess.Client(self.protocol) - @ThriftClientCall + @thrift_client_call def getRunData(self, run_name_filter, limit, offset, sort_mode): pass - @ThriftClientCall + @thrift_client_call def getRunHistory(self, run_ids, limit, offset, run_history_filter): pass - @ThriftClientCall + @thrift_client_call def getReportDetails(self, reportId): pass - @ThriftClientCall + @thrift_client_call def getSourceFileData(self, fileId, fileContent, encoding): pass - @ThriftClientCall + @thrift_client_call def getLinesInSourceFileContents(self, lines_in_files_requested, encoding): pass - @ThriftClientCall + @thrift_client_call def getDiffResultsHash(self, run_ids, report_hashes, diff_type, skip_detection_statuses, tag_ids): pass - @ThriftClientCall + @thrift_client_call def getRunResultTypes(self, runId, reportFilters): pass - @ThriftClientCall + @thrift_client_call def removeRunResults(self, run_ids): pass - @ThriftClientCall + @thrift_client_call def removeRunReports(self, run_ids, report_filter, cmp_data): pass - @ThriftClientCall + @thrift_client_call def removeRun(self, run_id, run_filter): pass - @ThriftClientCall + @thrift_client_call def updateRunData(self, run_id, new_run_name): pass - @ThriftClientCall + @thrift_client_call def getSuppressedBugs(self, run_id): pass - @ThriftClientCall + @thrift_client_call def getNewResults(self, base_run_id, new_run_id, limit, offset, sortType, reportFilters): pass - @ThriftClientCall + @thrift_client_call def getUnresolvedResults(self, base_run_id, new_run_id, limit, offset, sortType, reportFilters): pass - @ThriftClientCall + @thrift_client_call def getResolvedResults(self, base_run_id, new_run_id, limit, offset, sortType, reportFilters): pass - @ThriftClientCall + @thrift_client_call def changeReviewStatus(self, report_id, status, message): pass - @ThriftClientCall + @thrift_client_call def changeReviewStatusByHash(self, bug_hash, status, message): pass - @ThriftClientCall + # pylint: disable=redefined-builtin + @thrift_client_call def getReviewStatusRules(self, filter, sortMode, limit, offset): pass - @ThriftClientCall + @thrift_client_call def getRunResults(self, runIds, limit, offset, sortType, reportFilter, cmpData, getDetails): pass - @ThriftClientCall + @thrift_client_call def getReportAnnotations(self, key): pass - @ThriftClientCall + @thrift_client_call def getRunResultCount(self, runIds, reportFilter, cmpData): pass - @ThriftClientCall + @thrift_client_call def getSeverityCounts(self, runIds, reportFilter, cmpData): pass - @ThriftClientCall + @thrift_client_call def getCheckerMsgCounts(self, runIds, reportFilter, cmpData, limit, offset): pass - @ThriftClientCall + @thrift_client_call def getReviewStatusCounts(self, runIds, reportFilter, cmpData): pass - @ThriftClientCall + @thrift_client_call def getDetectionStatusCounts(self, runIds, reportFilter, cmpData): pass - @ThriftClientCall + @thrift_client_call def getFileCounts(self, runIds, reportFilter, cmpData, limit, offset): pass - @ThriftClientCall + @thrift_client_call def getCheckerCounts(self, base_run_ids, reportFilter, cmpData, limit, offset): pass - @ThriftClientCall + @thrift_client_call def exportData(self, runId): pass - @ThriftClientCall + @thrift_client_call def importData(self, exportData): pass # SOURCE COMPONENT RELATED API CALLS - @ThriftClientCall + @thrift_client_call def addSourceComponent(self, name, value, description): pass - @ThriftClientCall + @thrift_client_call def getSourceComponents(self, component_filter): pass - @ThriftClientCall + @thrift_client_call def removeSourceComponent(self, name): pass # STORAGE RELATED API CALLS - @ThriftClientCall + @thrift_client_call def getMissingContentHashes(self, file_hashes): pass - @ThriftClientCall + @thrift_client_call def getMissingContentHashesForBlameInfo(self, file_hashes): pass - @ThriftClientCall + @thrift_client_call def massStoreRun(self, name, tag, version, zipdir, force, trim_path_prefixes, description): pass - @ThriftClientCall + @thrift_client_call def allowsStoringAnalysisStatistics(self): pass - @ThriftClientCall + @thrift_client_call def getAnalysisStatisticsLimits(self): pass - @ThriftClientCall + @thrift_client_call def getAnalysisStatistics(self, run_id, run_history_id): pass - @ThriftClientCall + @thrift_client_call def storeAnalysisStatistics(self, run_name, zip_file): pass diff --git a/web/client/codechecker_client/product.py b/web/client/codechecker_client/product.py index e1f5bb7a79..3d071577a5 100644 --- a/web/client/codechecker_client/product.py +++ b/web/client/codechecker_client/product.py @@ -43,8 +43,8 @@ def expand_whole_protocol_and_port(protocol=None, port=None): elif protocol == 'https': portnum = 443 else: - raise ValueError("'{0}' is not a protocol understood by " - "CodeChecker".format(protocol)) + raise ValueError(f"'{protocol}' is not a protocol understood by " + "CodeChecker") else: protocol = 'http' portnum = 8001 @@ -102,25 +102,22 @@ def split_server_url(url): # A valid product_url looks like this: 'http://localhost:8001/Product'. protocol, port = expand_whole_protocol_and_port(protocol, None) host = 'localhost' - try: - parts = url.split('/', 1) - # Something is either a hostname, or a host:port. - server_addr = parts[0] - host, maybe_port = understand_server_addr(server_addr) - if maybe_port: - port = maybe_port - except Exception: - raise ValueError("The specified server URL is invalid.") + parts = url.split('/', 1) - LOG.debug("Result: With '%s' on server '%s:%s'", - protocol, host, port) + # Something is either a hostname, or a host:port. + server_addr = parts[0] + host, maybe_port = understand_server_addr(server_addr) + if maybe_port: + port = maybe_port + + LOG.debug("Result: With '%s' on server '%s:%s'", protocol, host, port) return protocol, host, port def create_product_url(protocol, host, port, endpoint): - return "{0}://{1}:{2}{3}".format(protocol, host, str(port), endpoint) + return f"{protocol}://{host}:{str(port)}{endpoint}" def split_product_url(url): @@ -146,34 +143,32 @@ def split_product_url(url): # A valid product_url looks like this: 'http://localhost:8001/Product'. protocol, port = expand_whole_protocol_and_port(protocol, None) host, product_name = 'localhost', 'Default' - try: - parts = url.split("/") - - if len(parts) == 1: - # If only one word is given in the URL, consider it as product - # name, but then it must appear to be a valid product name. - - # "Only one word" URLs can be just simple host names too: - # http://codechecker.example.com:1234 should NOT be understood as - # the "codechecker.example.com:1234" product on "localhost:8001". - product_name = parts[0] - if product_name[0].isdigit() or '.' in product_name \ - or ':' in product_name: - raise ValueError("The given product URL is invalid. Please " - "specify a full product URL.") - elif len(parts) == 2: - # URL is at least something/product-name. - product_name = parts[1] - - # Something is either a hostname, or a host:port. - server_addr = parts[0] - host, maybe_port = understand_server_addr(server_addr) - if maybe_port: - port = maybe_port - else: - raise ValueError("Product URL can not contain extra '/' chars.") - except Exception: - raise ValueError("The specified product URL is invalid.") + + parts = url.split("/") + + if len(parts) == 1: + # If only one word is given in the URL, consider it as product + # name, but then it must appear to be a valid product name. + + # "Only one word" URLs can be just simple host names too: + # http://codechecker.example.com:1234 should NOT be understood as + # the "codechecker.example.com:1234" product on "localhost:8001". + product_name = parts[0] + if product_name[0].isdigit() or '.' in product_name \ + or ':' in product_name: + raise ValueError("The given product URL is invalid. Please " + "specify a full product URL.") + elif len(parts) == 2: + # URL is at least something/product-name. + product_name = parts[1] + + # Something is either a hostname, or a host:port. + server_addr = parts[0] + host, maybe_port = understand_server_addr(server_addr) + if maybe_port: + port = maybe_port + else: + raise ValueError("Product URL can not contain extra '/' chars.") LOG.debug("Result: With '%s' on server '%s:%s', product '%s'", protocol, host, port, product_name) diff --git a/web/client/codechecker_client/thrift_call.py b/web/client/codechecker_client/thrift_call.py index e3544ef945..32e5b3dc18 100644 --- a/web/client/codechecker_client/thrift_call.py +++ b/web/client/codechecker_client/thrift_call.py @@ -30,16 +30,16 @@ def truncate_arg(arg, max_len=100): return arg -def ThriftClientCall(function): +def thrift_client_call(function): """ Wrapper function for thrift client calls. - open and close transport, - log and handle errors """ - funcName = function.__name__ + func_name = function.__name__ def wrapper(self, *args, **kwargs): self.transport.open() - func = getattr(self.client, funcName) + func = getattr(self.client, func_name) try: try: return func(*args, **kwargs) @@ -54,38 +54,35 @@ def wrapper(self, *args, **kwargs): return func(*args, **kwargs) except codechecker_api_shared.ttypes.RequestFailed as reqfailure: - LOG.error('Calling API endpoint: %s', funcName) - if reqfailure.errorCode ==\ + LOG.error('Calling API endpoint: %s', func_name) + if reqfailure.errorCode == \ codechecker_api_shared.ttypes.ErrorCode.GENERAL and \ reqfailure.extraInfo and \ reqfailure.extraInfo[0] == "report_limit": # We handle this error in near the business logic. raise reqfailure + + if reqfailure.errorCode ==\ + codechecker_api_shared.ttypes.ErrorCode.DATABASE: + LOG.error( + 'Database error on server\n%s', str(reqfailure.message)) + elif reqfailure.errorCode ==\ + codechecker_api_shared.ttypes.ErrorCode.AUTH_DENIED: + LOG.error( + 'Authentication denied\n %s', str(reqfailure.message)) + elif reqfailure.errorCode ==\ + codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED: + LOG.error( + 'Unauthorized to access\n %s', str(reqfailure.message)) + LOG.error( + 'Ask the product admin for additional access rights.') + elif reqfailure.errorCode ==\ + codechecker_api_shared.ttypes.ErrorCode.API_MISMATCH: + LOG.error( + 'Client/server API mismatch\n %s', str(reqfailure.message)) else: - if reqfailure.errorCode ==\ - codechecker_api_shared.ttypes.ErrorCode.DATABASE: - LOG.error('Database error on server\n%s', - str(reqfailure.message)) - elif reqfailure.errorCode ==\ - codechecker_api_shared.ttypes.ErrorCode.AUTH_DENIED: - LOG.error('Authentication denied\n %s', - str(reqfailure.message)) - elif reqfailure.errorCode ==\ - codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED: - LOG.error('Unauthorized to access\n %s', - str(reqfailure.message)) - LOG.error('Ask the product admin for additional access ' - 'rights.') - elif reqfailure.errorCode ==\ - codechecker_api_shared.ttypes.ErrorCode.API_MISMATCH: - LOG.error('Client/server API mismatch\n %s', - str(reqfailure.message)) - else: - LOG.error('API call error: %s\n%s', - funcName, - str(reqfailure) - ) - sys.exit(1) + LOG.error('API call error: %s\n%s', func_name, str(reqfailure)) + sys.exit(1) except TApplicationException as ex: LOG.error("Internal server error: %s", str(ex.message)) sys.exit(1) @@ -100,7 +97,7 @@ def wrapper(self, *args, **kwargs): LOG.error('Thrift size limit error.') elif ex.type == TProtocolException.BAD_VERSION: LOG.error('Thrift bad version error.') - LOG.error(funcName) + LOG.error(func_name) # Do not print the argument list if it contains sensitive # information such as passwords. @@ -108,7 +105,7 @@ def wrapper(self, *args, **kwargs): # the full content of it (for example the 'b64zip' parameter of the # 'massStoreRun' API function). For this reason we have to truncate # the arguments. - if funcName != "performLogin": + if func_name != "performLogin": LOG.error([truncate_arg(arg) for arg in args]) LOG.error(kwargs) diff --git a/web/codechecker_web/shared/pgpass.py b/web/codechecker_web/shared/pgpass.py index 8d4e56222b..935509613a 100644 --- a/web/codechecker_web/shared/pgpass.py +++ b/web/codechecker_web/shared/pgpass.py @@ -39,11 +39,13 @@ def _match_field(line, field): escaped = False if not field: return None - elif field[0] == line[0]: + + if field[0] == line[0]: line = line[1:] field = field[1:] else: return None + return None diff --git a/web/codechecker_web/shared/version.py b/web/codechecker_web/shared/version.py index 34d8826c36..e5d544a750 100644 --- a/web/codechecker_web/shared/version.py +++ b/web/codechecker_web/shared/version.py @@ -23,11 +23,11 @@ # Used by the client to automatically identify the latest major and minor # version. -CLIENT_API = '{0}.{1}'.format( - max(SUPPORTED_VERSIONS.keys()), - SUPPORTED_VERSIONS[max(SUPPORTED_VERSIONS.keys())]) +CLIENT_API = \ + f'{max(SUPPORTED_VERSIONS.keys())}.' \ + f'{SUPPORTED_VERSIONS[max(SUPPORTED_VERSIONS.keys())]}' def get_version_str(): - return ', '.join(["v" + str(v) + "." + str(SUPPORTED_VERSIONS[v]) - for v in SUPPORTED_VERSIONS]) + return ', '.join(f"v{str(major)}.{str(minor)}" + for major, minor in SUPPORTED_VERSIONS.items()) diff --git a/web/requirements_py/dev/requirements.txt b/web/requirements_py/dev/requirements.txt index 5674ba6558..a85164e057 100644 --- a/web/requirements_py/dev/requirements.txt +++ b/web/requirements_py/dev/requirements.txt @@ -1,7 +1,7 @@ pycodestyle==2.12.0 psycopg2-binary==2.8.6 pg8000==1.15.2 -pylint==2.8.2 +pylint==3.2.4 pytest==7.3.1 mkdocs==1.5.3 coverage==5.5.0 diff --git a/web/server/codechecker_server/api/authentication.py b/web/server/codechecker_server/api/authentication.py index 97ada81c66..1430ad9fd6 100644 --- a/web/server/codechecker_server/api/authentication.py +++ b/web/server/codechecker_server/api/authentication.py @@ -33,6 +33,8 @@ LOG = get_logger('server') +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name class ThriftAuthHandler: """ Handle Thrift authentication requests. @@ -88,10 +90,7 @@ def getAuthParameters(self): @timeit def getLoggedInUser(self): - if self.__auth_session: - return self.__auth_session.user - else: - return "" + return self.__auth_session.user if self.__auth_session else "" @timeit def getAcceptedAuthMethods(self): @@ -160,8 +159,8 @@ def performLogin(self, auth_method, auth_string): LOG.info("'%s' logged in.", user_name) return session.token else: - msg = "Invalid credentials supplied for user '{0}'. " \ - "Refusing authentication!".format(user_name) + msg = f"Invalid credentials supplied for user " \ + f"'{user_name}'. Refusing authentication!" LOG.warning(msg) raise codechecker_api_shared.ttypes.RequestFailed( @@ -284,8 +283,7 @@ def getAuthorisedNames(self, permission, extra_params): if not require_manager(perm, params, self.__auth_session): raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED, - "You can not manage the permission '{0}'" - .format(perm.name)) + f"You can not manage the permission '{perm.name}'") handler = make_handler(perm, params) users, groups = handler.list_permitted() @@ -308,8 +306,7 @@ def addPermission(self, permission, auth_name, is_group, extra_params): if not require_manager(perm, params, self.__auth_session): raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED, - "You can not manage the permission '{0}'" - .format(perm.name)) + f"You can not manage the permission '{perm.name}'") handler = make_handler(perm, params) handler.add_permission(auth_name.strip(), @@ -332,8 +329,7 @@ def removePermission(self, permission, auth_name, is_group, extra_params): if not require_manager(perm, params, self.__auth_session): raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED, - "You can not manage the permission '{0}'" - .format(perm.name)) + f"You can not manage the permission '{perm.name}'") handler = make_handler(perm, params) handler.remove_permission(auth_name, is_group, @@ -402,8 +398,8 @@ def removeToken(self, token): if not num_of_removed: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, - "Personal access token {0} was not found in the " - "database.".format(token)) + f"Personal access token {token} was not found in the " + "database.") # Invalidate the local session by token. self.__manager.invalidate_local_session(token) diff --git a/web/server/codechecker_server/api/config_handler.py b/web/server/codechecker_server/api/config_handler.py index 0ebb173224..aeba492d02 100644 --- a/web/server/codechecker_server/api/config_handler.py +++ b/web/server/codechecker_server/api/config_handler.py @@ -24,6 +24,8 @@ LOG = get_logger('server') +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name class ThriftConfigHandler: """ Manages Thrift requests regarding configuration. diff --git a/web/server/codechecker_server/api/mass_store_run.py b/web/server/codechecker_server/api/mass_store_run.py index 4b861d1822..87ab4e2a52 100644 --- a/web/server/codechecker_server/api/mass_store_run.py +++ b/web/server/codechecker_server/api/mass_store_run.py @@ -74,6 +74,7 @@ class RunLocking: def __init__(self, session: DBSession, run_name: str): self.__session = session self.__run_name = run_name + self.__run_lock = None def __enter__(self, *args): # Load the lock record for "FOR UPDATE" so that the transaction that @@ -184,9 +185,9 @@ def add_file_record( file_id = session.execute(insert_stmt).inserted_primary_key[0] session.commit() return file_id - else: - file_record = File(file_path, content_hash, None, None) - session.add(file_record) + + file_record = File(file_path, content_hash, None, None) + session.add(file_record) session.commit() except sqlalchemy.exc.IntegrityError as ex: LOG.error(ex) @@ -242,7 +243,7 @@ def __init__( version: Optional[str], b64zip: str, force: bool, - trim_path_prefixes: Optional[List[str]], + trim_path_prefix_list: Optional[List[str]], description: Optional[str] ): """ Initialize object. """ @@ -253,24 +254,24 @@ def __init__( self.__version = version self.__b64zip = b64zip self.__force = force - self.__trim_path_prefixes = trim_path_prefixes + self.__trim_path_prefixes = trim_path_prefix_list self.__description = description - self.__mips: Dict[str, MetadataInfoParser] = dict() - self.__analysis_info: Dict[str, AnalysisInfo] = dict() - self.__checker_row_cache: Dict[Tuple[str, str], Checker] = dict() + self.__mips: Dict[str, MetadataInfoParser] = {} + self.__analysis_info: Dict[str, AnalysisInfo] = {} + self.__checker_row_cache: Dict[Tuple[str, str], Checker] = {} self.__duration: int = 0 self.__report_count: int = 0 self.__report_limit: int = 0 - self.__wrong_src_code_comments: List[str] = list() + self.__wrong_src_code_comments: List[str] = [] self.__already_added_report_hashes: Set[str] = set() - self.__new_report_hashes: Dict[str, Tuple] = dict() + self.__new_report_hashes: Dict[str, Tuple] = {} self.__all_report_checkers: Set[str] = set() - self.__added_reports: List[Tuple[DBReport, Report]] = list() + self.__added_reports: List[Tuple[DBReport, Report]] = [] self.__reports_with_fake_checkers: Dict[ # Either a DBReport *without* an ID, or the ID of a committed # DBReport. - str, Tuple[Report, Union[DBReport, int]]] = dict() + str, Tuple[Report, Union[DBReport, int]]] = {} self.__get_report_limit_for_product() @@ -278,10 +279,6 @@ def __init__( def __manager(self): return self.__report_server._manager - @property - def __Session(self): - return self.__report_server._Session - @property def __config_database(self): return self.__report_server._config_database @@ -310,7 +307,7 @@ def __check_run_limit(self): max_run_count = product.run_limit # Session that handles constraints on the run. - with DBSession(self.__Session) as session: + with DBSession(self.__report_server._Session) as session: if not max_run_count: return @@ -385,16 +382,16 @@ def __store_run_lock(self, session: DBSession): when) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, - "The run named '{0}' is being stored into by {1}. If the " - "other store operation has failed, this lock will expire " - "at '{2}'.".format(self.__name, username, when)) + f"The run named '{self.__name}' is being stored into by " + f"{username}. If the other store operation has failed, this " + f"lock will expire at '{when}'.") # At any rate, if the lock has been created or updated, commit it # into the database. try: session.commit() except (sqlalchemy.exc.IntegrityError, - sqlalchemy.orm.exc.StaleDataError): + sqlalchemy.orm.exc.StaleDataError) as ex: # The commit of this lock can fail. # # In case two store ops attempt to lock the same run name at the @@ -414,8 +411,8 @@ def __store_run_lock(self, session: DBSession): self.__name) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, - "The run named '{0}' is being stored into by another " - "user.".format(self.__name)) + f"The run named '{self.__name}' is being stored into by " + "another user.") from ex def __free_run_lock(self, session: DBSession): """ Remove the lock from the database for the given run name. """ @@ -448,7 +445,7 @@ def __store_source_files( # record in the database or we need to add one. LOG.debug('%s not found or already stored.', trimmed_file_path) - with DBSession(self.__Session) as session: + with DBSession(self.__report_server._Session) as session: fid = add_file_record( session, trimmed_file_path, file_hash) @@ -461,7 +458,7 @@ def __store_source_files( source_file_path, file_hash) continue - with DBSession(self.__Session) as session: + with DBSession(self.__report_server._Session) as session: self.__add_file_content(session, source_file_path, file_hash) file_path_to_id[trimmed_file_path] = add_file_record( @@ -483,7 +480,7 @@ def __add_blame_info( .zip file. This function stores blame info even if the corresponding source file is not in the .zip file. """ - with DBSession(self.__Session) as session: + with DBSession(self.__report_server._Session) as session: for subdir, _, files in os.walk(blame_root): for f in files: blame_file = os.path.join(subdir, f) @@ -603,7 +600,7 @@ def __store_checker_identifiers(self, checkers: Set[Tuple[str, str]]): tries += 1 try: LOG.debug("[%s] Begin attempt %d...", self.__name, tries) - with DBSession(self.__Session) as session: + with DBSession(self.__report_server._Session) as session: known_checkers = {(r.analyzer_name, r.checker_name) for r in session .query(Checker.analyzer_name, @@ -743,7 +740,7 @@ def __store_analysis_info( connection_rows = [AnalysisInfoChecker( analysis_info, db_checkers[chk], is_enabled) for chk, is_enabled - in mip.checkers.get(analyzer, dict()).items()] + in mip.checkers.get(analyzer, {}).items()] for r in connection_rows: session.add(r) @@ -948,14 +945,11 @@ def __load_report_ids_for_reports_with_fake_checkers(self, session): the __realise_fake_checkers() operation. The reports **MUST** have been committed prior. """ - for rph in self.__reports_with_fake_checkers: - report, db_report = cast(Tuple[Report, DBReport], - self.__reports_with_fake_checkers[rph]) + for rph, (report, db_report) in \ + self.__reports_with_fake_checkers.items(): # Only load the "id" field from the database, not the entire row. session.refresh(db_report, ["id"]) - id_: int = db_report.id - - self.__reports_with_fake_checkers[rph] = (report, id_) + self.__reports_with_fake_checkers[rph] = (report, db_report.id) def __realise_fake_checkers(self, session): """ @@ -1166,7 +1160,7 @@ def __validate_and_add_report_annotations( doesn't match then an exception is thrown. In case of proper format the annotation is added to the database. """ - ALLOWED_TYPES = { + allowed_types = { "datetime": { "func": datetime.fromisoformat, "display": "date-time in ISO format" @@ -1177,27 +1171,28 @@ def __validate_and_add_report_annotations( } } - ALLOWED_ANNOTATIONS = { - "timestamp": ALLOWED_TYPES["datetime"], - "testsuite": ALLOWED_TYPES["string"], - "testcase": ALLOWED_TYPES["string"] + allowed_annotations = { + "timestamp": allowed_types["datetime"], + "testsuite": allowed_types["string"], + "testcase": allowed_types["string"] } for key, value in report_annotation.items(): try: - ALLOWED_ANNOTATIONS[key]["func"](value) + allowed_annotations[key]["func"](value) session.add(ReportAnnotations(report_id, key, value)) except KeyError: + # pylint: disable=raise-missing-from raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.REPORT_FORMAT, f"'{key}' is not an allowed report annotation.", - ALLOWED_ANNOTATIONS.keys()) + allowed_annotations.keys()) except ValueError: + # pylint: disable=raise-missing-from raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.REPORT_FORMAT, f"'{value}' has wrong format. '{key}' annotations must be " - f"'{ALLOWED_ANNOTATIONS[key]['display']}'." - ) + f"'{allowed_annotations[key]['display']}'.") def __get_report_limit_for_product(self): with DBSession(self.__config_database) as session: @@ -1267,7 +1262,7 @@ def get_skip_handler( # Reset internal data. self.__already_added_report_hashes = set() - self.__new_report_hashes = dict() + self.__new_report_hashes = {} self.__all_report_checkers = set() all_reports = session.query(DBReport) \ @@ -1413,7 +1408,7 @@ def store(self) -> int: # Check constraints of the run. self.__check_run_limit() - with DBSession(self.__Session) as session: + with DBSession(self.__report_server._Session) as session: self.__store_run_lock(session) try: @@ -1468,13 +1463,13 @@ def store(self) -> int: for metadata in self.__mips.values() for analyzer in metadata.analyzers for checker - in metadata.checkers.get(analyzer, dict()).keys()} + in metadata.checkers.get(analyzer, {}).keys()} self.__store_checker_identifiers(checkers_in_metadata) try: # This session's transaction buffer stores the actual # run data into the database. - with DBSession(self.__Session) as session, \ + with DBSession(self.__report_server._Session) as session, \ RunLocking(session, self.__name): # Actual store operation begins here. run_id, update_run = self.__add_or_update_run( @@ -1500,7 +1495,7 @@ def store(self) -> int: self.__store_checker_identifiers( additional_checkers) - with DBSession(self.__Session) as session, \ + with DBSession(self.__report_server._Session) as session, \ RunLocking(session, self.__name): # The data of the run has been successfully committed # into the database. Deal with post-processing issues @@ -1552,8 +1547,7 @@ def store(self) -> int: sqlalchemy.exc.ProgrammingError) as ex: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, - "Storing reports to the database failed: {0}" - .format(ex)) + f"Storing reports to the database failed: {ex}") except Exception as ex: LOG.error("Failed to store results: %s", ex) import traceback @@ -1566,7 +1560,7 @@ def store(self) -> int: # (If the failure is undetectable, the coded grace period expiry # of the lock will allow further store operations to the given # run name.) - with DBSession(self.__Session) as session: + with DBSession(self.__report_server._Session) as session: self.__free_run_lock(session) if self.__wrong_src_code_comments: diff --git a/web/server/codechecker_server/api/product_server.py b/web/server/codechecker_server/api/product_server.py index e1a7b18ffb..026f57aeff 100644 --- a/web/server/codechecker_server/api/product_server.py +++ b/web/server/codechecker_server/api/product_server.py @@ -34,6 +34,8 @@ LOG = get_logger('server') +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name class ThriftProductHandler: """ Connect to database and handle thrift client requests. @@ -67,9 +69,9 @@ def __require_permission(self, required, args=None): args = dict(self.__permission_args) args['config_db_session'] = session - if not any([permissions.require_permission( - perm, args, self.__auth_session) - for perm in required]): + if not any(permissions.require_permission( + perm, args, self.__auth_session) + for perm in required): raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED, "You are not authorized to execute this action.") @@ -249,7 +251,7 @@ def getProductConfiguration(self, product_id): with DBSession(self.__session) as session: product = session.query(Product).get(product_id) if product is None: - msg = "Product with ID {0} does not exist!".format(product_id) + msg = f"Product with ID {product_id} does not exist!" LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( @@ -330,8 +332,9 @@ def addProduct(self, product): msg) if self.__server.get_product(product.endpoint): - msg = "A product endpoint '/{0}' is already configured!" \ - .format(product.endpoint) + msg = \ + f"A product endpoint '/{product.endpoint}' is already " \ + "configured!" LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.GENERAL, @@ -369,7 +372,7 @@ def addProduct(self, product): conn_str_args = {'postgresql': False, 'sqlite': dbc.database} else: - msg = "Database engine '{0}' unknown!".format(dbc.engine) + msg = f"Database engine '{dbc.engine}' unknown!" LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.GENERAL, @@ -402,9 +405,9 @@ def addProduct(self, product): self.__server.add_product(orm_prod, init_db=True) connection_wrapper = self.__server.get_product(product.endpoint) if connection_wrapper.last_connection_failure: - msg = "The configured connection for '/{0}' failed: {1}" \ - .format(product.endpoint, - connection_wrapper.last_connection_failure) + msg = \ + f"The configured connection for '/{product.endpoint}' " \ + f"failed: {connection_wrapper.last_connection_failure}" LOG.error(msg) self.__server.remove_product(product.endpoint) @@ -445,7 +448,7 @@ def editProduct(self, product_id, new_config): with DBSession(self.__session) as session: product = session.query(Product).get(product_id) if product is None: - msg = "Product with ID {0} does not exist!".format(product_id) + msg = f"Product with ID {product_id} does not exist!" LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg) @@ -528,7 +531,7 @@ def editProduct(self, product_id, new_config): conn_str_args = {'postgresql': False, 'sqlite': dbc.database} else: - msg = "Database engine '{0}' unknown!".format(dbc.engine) + msg = f"Database engine '{dbc.engine}' unknown!" LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.GENERAL, @@ -571,9 +574,10 @@ def editProduct(self, product_id, new_config): connection_wrapper = self.__server.get_product(dummy_endpoint) if connection_wrapper.last_connection_failure: - msg = "The configured connection for '/{0}' failed: {1}" \ - .format(new_config.endpoint, - connection_wrapper.last_connection_failure) + msg = \ + f"The configured connection for " \ + f"'/{new_config.endpoint}' failed: " \ + f"{connection_wrapper.last_connection_failure}" LOG.error(msg) self.__server.remove_product(dummy_endpoint) @@ -631,7 +635,7 @@ def removeProduct(self, product_id): with DBSession(self.__session) as session: product = session.query(Product).get(product_id) if product is None: - msg = "Product with ID {0} does not exist!".format(product_id) + msg = f"Product with ID {product_id} does not exist!" LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( diff --git a/web/server/codechecker_server/api/report_server.py b/web/server/codechecker_server/api/report_server.py index 5b34a6b75b..6d7b167f69 100644 --- a/web/server/codechecker_server/api/report_server.py +++ b/web/server/codechecker_server/api/report_server.py @@ -73,6 +73,8 @@ detection_status_str, report_status_enum, \ review_status_enum, review_status_str, report_extended_data_type_enum +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name LOG = get_logger('server') @@ -94,6 +96,8 @@ def comment_kind_from_thrift_type(kind): elif kind == ttypes.CommentKind.SYSTEM: return CommentKindValue.SYSTEM + assert False, f"Unknown ttypes.CommentKind: {kind}" + def comment_kind_to_thrift_type(kind): """ Convert the given comment kind from Python enum to Thrift type. """ @@ -102,6 +106,8 @@ def comment_kind_to_thrift_type(kind): elif kind == CommentKindValue.SYSTEM: return ttypes.CommentKind.SYSTEM + assert False, f"Unknown CommentKindValue: {kind}" + def verify_limit_range(limit): """Verify limit value for the queries. @@ -135,16 +141,16 @@ def slugify(text): return norm_text -def exc_to_thrift_reqfail(func): +def exc_to_thrift_reqfail(function): """ Convert internal exceptions to RequestFailed exception which can be sent back on the thrift connections. """ - func_name = func.__name__ + func_name = function.__name__ def wrapper(*args, **kwargs): try: - res = func(*args, **kwargs) + res = function(*args, **kwargs) return res except sqlalchemy.exc.SQLAlchemyError as alchemy_ex: @@ -516,11 +522,14 @@ def get_source_component_file_query( if skip and include: include_q, skip_q = get_include_skip_queries(include, skip) return File.id.in_(include_q.except_(skip_q)) - elif include: + + if include: return or_(*[File.filepath.like(conv(fp)) for fp in include]) elif skip: return and_(*[not_(File.filepath.like(conv(fp))) for fp in skip]) + return None + def get_reports_by_bugpath_filter(session, file_filter_q) -> Set[int]: """ @@ -607,6 +616,8 @@ def get_query(component_name: str): elif skip: return or_(*[File.filepath.like(conv(fp)) for fp in skip]) + return None + queries = [get_query(n) for (n, ) in component_names] return and_(*queries) @@ -734,26 +745,20 @@ def process_cmp_data_filter(session, run_ids, report_filter, cmp_data): query_new_runs = get_diff_run_id_query(session, cmp_data.runIds, cmp_data.runTag) - AND = [] if cmp_data.diffType == DiffType.NEW: return and_(Report.bug_id.in_(query_new.except_(query_base)), Report.run_id.in_(query_new_runs)), [Run] - elif cmp_data.diffType == DiffType.RESOLVED: return and_(Report.bug_id.in_(query_base.except_(query_new)), Report.run_id.in_(query_base_runs)), [Run] - elif cmp_data.diffType == DiffType.UNRESOLVED: return and_(Report.bug_id.in_(query_base.intersect(query_new)), Report.run_id.in_(query_new_runs)), [Run] - else: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, 'Unsupported diff type: ' + str(cmp_data.diffType)) - return and_(*AND), [] - def process_run_history_filter(query, run_ids, run_history_filter): """ @@ -886,7 +891,6 @@ def get_report_details(session, report_ids): .order_by(Comment.created_at.desc()) for data, report_id in comment_query: - report_id = report_id comment_data = comment_data_db_to_api(data) comment_data_list[report_id].append(comment_data) @@ -1095,8 +1099,7 @@ def check_remove_runs_lock(session, run_ids): raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, "Can not remove results because the following runs " - "are locked: {0}".format( - ', '.join([r[0] for r in run_locks]))) + f"are locked: {', '.join([r[0] for r in run_locks])}") def sort_run_data_query(query, sort_mode): @@ -1184,7 +1187,7 @@ def get_commit_url( ) -> Optional[str]: """ Get commit url for the given remote url. """ if not remote_url: - return + return None for git_commit_url in git_commit_urls: m = git_commit_url["regex"].match(remote_url) @@ -1196,6 +1199,8 @@ def get_commit_url( return url + return None + def get_cleanup_plan(session, cleanup_plan_id: int) -> CleanupPlan: """ @@ -1320,12 +1325,8 @@ def get_run_id_expression(session, report_filter): cast(Run.id, sqlalchemy.String).distinct(), ',' ).label("run_id") - else: - return func.group_concat( - Run.id.distinct() - ).label("run_id") - else: - return Run.id.label("run_id") + return func.group_concat(Run.id.distinct()).label("run_id") + return Run.id.label("run_id") def get_is_enabled_case(subquery): @@ -1445,9 +1446,9 @@ def __require_permission(self, required): args = dict(self.__permission_args) args['config_db_session'] = session - if not any([permissions.require_permission( + if not any(permissions.require_permission( perm, args, self._auth_session) - for perm in required]): + for perm in required): raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED, "You are not authorized to execute this action.") @@ -1547,13 +1548,14 @@ def getRunData(self, run_filter, limit, offset, sort_mode): status_sum[run_id][detection_status_enum(status)] = count # Get analyzer statistics. - analyzer_statistics = defaultdict(lambda: defaultdict()) + analyzer_statistics = defaultdict(defaultdict) stat_q = get_analysis_statistics_query(session, run_filter.ids) - for stat, run_id in stat_q: - analyzer_statistics[run_id][stat.analyzer_type] = \ - ttypes.AnalyzerStatistics(failed=stat.failed, - successful=stat.successful) + for analyzer_stat, run_id in stat_q: + analyzer_statistics[run_id][analyzer_stat.analyzer_type] = \ + ttypes.AnalyzerStatistics( + failed=analyzer_stat.failed, + successful=analyzer_stat.successful) results = [] @@ -1698,11 +1700,11 @@ def getRunHistory(self, run_ids, limit, offset, run_history_filter): results = [] for history in res: analyzer_statistics = {} - for stat in history.analyzer_statistics: - analyzer_statistics[stat.analyzer_type] = \ + for analyzer_stat in history.analyzer_statistics: + analyzer_statistics[analyzer_stat.analyzer_type] = \ ttypes.AnalyzerStatistics( - failed=stat.failed, - successful=stat.successful) + failed=analyzer_stat.failed, + successful=analyzer_stat.successful) results.append(RunHistoryData( id=history.id, @@ -1836,8 +1838,8 @@ def getDiffResultsHash(self, run_ids, report_hashes, diff_type, # than 8435 sqlite threw a `Segmentation fault` error. # For this reason we create queries with chunks. new_hashes = [] - for chunk in util.chunks(iter(report_hashes), - SQLITE_MAX_COMPOUND_SELECT): + for chunk in util.chunks( + iter(report_hashes), SQLITE_MAX_COMPOUND_SELECT): new_hashes_query = union_all(*[ select([bindparam('bug_id' + str(i), h) .label('bug_id')]) @@ -1862,7 +1864,6 @@ def getDiffResultsHash(self, run_ids, report_hashes, diff_type, results, run_ids, tag_ids) return [res[0] for res in results] - elif diff_type == DiffType.UNRESOLVED: results = session.query(Report.bug_id) \ .filter(Report.bug_id.in_(report_hashes)) @@ -1881,7 +1882,6 @@ def getDiffResultsHash(self, run_ids, report_hashes, diff_type, results, run_ids, tag_ids) return [res[0] for res in results] - else: return [] @@ -2357,12 +2357,11 @@ def _setReviewStatus(self, session, report_hash, status, new_review_status = review_status.status.capitalize() if message: system_comment_msg = \ - 'rev_st_changed_msg {0} {1} {2}'.format( - old_review_status, new_review_status, - shlex.quote(message)) + f'rev_st_changed_msg {old_review_status} ' \ + f'{new_review_status} {shlex.quote(message)}' else: - system_comment_msg = 'rev_st_changed {0} {1}'.format( - old_review_status, new_review_status) + system_comment_msg = \ + f'rev_st_changed {old_review_status} {new_review_status}' system_comment = self.__add_comment(review_status.bug_hash, system_comment_msg, @@ -2611,10 +2610,9 @@ def getComments(self, report_id): return result else: - msg = 'Report id ' + str(report_id) + \ - ' was not found in the database.' raise codechecker_api_shared.ttypes.RequestFailed( - codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg) + codechecker_api_shared.ttypes.ErrorCode.DATABASE, + f'Report id {report_id} was not found in the database.') @exc_to_thrift_reqfail @timeit @@ -2654,8 +2652,7 @@ def addComment(self, report_id, comment_data): return True else: - msg = 'Report id ' + str(report_id) + \ - ' was not found in the database.' + msg = f'Report id {report_id} was not found in the database.' LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg) @@ -2681,7 +2678,7 @@ def updateComment(self, comment_id, content): comment = session.query(Comment).get(comment_id) if comment: - if comment.author != 'Anonymous' and comment.author != user: + if comment.author not in ('Anonymous', user): raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED, 'Unathorized comment modification!') @@ -2689,9 +2686,9 @@ def updateComment(self, comment_id, content): # Create system comment if the message is changed. message = comment.message.decode('utf-8') if message != content: - system_comment_msg = 'comment_changed {0} {1}'.format( - shlex.quote(message), - shlex.quote(content)) + system_comment_msg = \ + f'comment_changed {shlex.quote(message)} ' \ + f'{shlex.quote(content)}' system_comment = \ self.__add_comment(comment.bug_hash, @@ -2705,8 +2702,7 @@ def updateComment(self, comment_id, content): session.commit() return True else: - msg = 'Comment id ' + str(comment_id) + \ - ' was not found in the database.' + msg = f'Comment id {comment_id} was not found in the database.' LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg) @@ -2727,7 +2723,7 @@ def removeComment(self, comment_id): comment = session.query(Comment).get(comment_id) if comment: - if comment.author != 'Anonymous' and comment.author != user: + if comment.author not in ('Anonymous', user): raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED, 'Unathorized comment modification!') @@ -2740,14 +2736,13 @@ def removeComment(self, comment_id): return True else: - msg = 'Comment id ' + str(comment_id) + \ - ' was not found in the database.' raise codechecker_api_shared.ttypes.RequestFailed( - codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg) + codechecker_api_shared.ttypes.ErrorCode.DATABASE, + f'Comment id {comment_id} was not found in the database.') @exc_to_thrift_reqfail @timeit - def getCheckerDoc(self, checkerId): + def getCheckerDoc(self, _): """ Parameters: - checkerId @@ -2864,6 +2859,7 @@ def getBlameInfo(self, fileId): commits=commits, blame=blame_data) except Exception: + # pylint: disable=raise-missing-from raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, "Failed to get blame information for file id: " + fileId) @@ -2889,8 +2885,8 @@ def getLinesInSourceFileContents(self, lines_in_files_requested, encoding): .filter(File.id.in_( [line.fileId if line.fileId is not None else LOG.warning( - f"File content " - "requested without fileId {l}") + "File content requested " + f"without fileId {line.fileId}") for line in chunk])) \ .all() for content in contents: @@ -3064,7 +3060,7 @@ def getCheckerStatusVerificationDetails(self, run_ids, report_filter): checker_name, \ analyzer_name, \ severity, \ - run_ids, \ + run_id_list, \ is_enabled, \ is_opened, \ cnt \ @@ -3083,9 +3079,9 @@ def getCheckerStatusVerificationDetails(self, run_ids, report_filter): )) if is_enabled: - for r in (run_ids.split(",") - if isinstance(run_ids, str) - else [run_ids]): + for r in (run_id_list.split(",") + if isinstance(run_id_list, str) + else [run_id_list]): run_id = int(r) if run_id not in checker_stat.enabled: checker_stat.enabled.append(run_id) @@ -3796,8 +3792,7 @@ def updateRunData(self, run_id, new_run_name): return True else: - msg = 'Run id ' + str(run_id) + \ - ' was not found in the database.' + msg = f'Run id {run_id} was not found in the database.' LOG.error(msg) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg) @@ -3892,10 +3887,9 @@ def removeSourceComponent(self, name): name, self._get_username()) return True else: - msg = 'Source component ' + str(name) + \ - ' was not found in the database.' raise codechecker_api_shared.ttypes.RequestFailed( - codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg) + codechecker_api_shared.ttypes.ErrorCode.DATABASE, + f'Source component {name} was not found in the database.') @exc_to_thrift_reqfail @timeit @@ -3912,7 +3906,7 @@ def getMissingContentHashes(self, file_hashes): .filter(FileContent.content_hash.in_(file_hashes)) return list(set(file_hashes) - - set([fc.content_hash for fc in q])) + set(fc.content_hash for fc in q)) @exc_to_thrift_reqfail @timeit @@ -3930,7 +3924,7 @@ def getMissingContentHashesForBlameInfo(self, file_hashes): .filter(FileContent.blame_info.isnot(None)) return list(set(file_hashes) - - set([fc.content_hash for fc in q])) + set(fc.content_hash for fc in q)) @exc_to_thrift_reqfail @timeit @@ -3948,14 +3942,14 @@ def massStoreRun(self, name, tag, version, b64zip, force, def allowsStoringAnalysisStatistics(self): self.__require_store() - return True if self._manager.get_analysis_statistics_dir() else False + return bool(self._manager.get_analysis_statistics_dir()) @exc_to_thrift_reqfail @timeit def getAnalysisStatisticsLimits(self): self.__require_store() - cfg = dict() + cfg = {} # Get the limit of failure zip size. failure_zip_size = self._manager.get_failure_zip_size() @@ -4019,17 +4013,18 @@ def getAnalysisStatistics(self, run_id, run_history_id): query = get_analysis_statistics_query( session, run_ids, run_history_ids) - for stat, run_id in query: - failed_files = zlib.decompress(stat.failed_files).decode( - 'utf-8').split('\n') if stat.failed_files else [] + for anal_stat, _ in query: + failed_files = zlib.decompress(anal_stat.failed_files).decode( + 'utf-8').split('\n') if anal_stat.failed_files else [] analyzer_version = zlib.decompress( - stat.version).decode('utf-8') if stat.version else None + anal_stat.version).decode('utf-8') \ + if anal_stat.version else None - analyzer_statistics[stat.analyzer_type] = \ + analyzer_statistics[anal_stat.analyzer_type] = \ ttypes.AnalyzerStatistics(version=analyzer_version, - failed=stat.failed, + failed=anal_stat.failed, failedFilePaths=failed_files, - successful=stat.successful) + successful=anal_stat.successful) return analyzer_statistics @exc_to_thrift_reqfail diff --git a/web/server/codechecker_server/api/server_info_handler.py b/web/server/codechecker_server/api/server_info_handler.py index a3daa59bcf..d5115d0ca7 100644 --- a/web/server/codechecker_server/api/server_info_handler.py +++ b/web/server/codechecker_server/api/server_info_handler.py @@ -15,6 +15,8 @@ LOG = get_logger('server') +# These names are inherited from Thrift stubs. +# pylint: disable=invalid-name class ThriftServerInfoHandler: """ Manages Thrift requests regarding server info. diff --git a/web/server/codechecker_server/api/thrift_enum_helper.py b/web/server/codechecker_server/api/thrift_enum_helper.py index 868e1e2f40..089ec61787 100644 --- a/web/server/codechecker_server/api/thrift_enum_helper.py +++ b/web/server/codechecker_server/api/thrift_enum_helper.py @@ -10,13 +10,16 @@ """ +from typing import Optional from codechecker_api.codeCheckerDBAccess_v6.ttypes import DetectionStatus, \ ExtendedReportDataType, ReportStatus, ReviewStatus from codechecker_api.ProductManagement_v6.ttypes import Confidentiality -def detection_status_enum(status): - if status == 'new': +def detection_status_enum(status: Optional[str]) -> Optional[DetectionStatus]: + if status is None: + return None + elif status == 'new': return DetectionStatus.NEW elif status == 'resolved': return DetectionStatus.RESOLVED @@ -29,9 +32,13 @@ def detection_status_enum(status): elif status == 'unavailable': return DetectionStatus.UNAVAILABLE + assert False, f"Unknown detection status: {status}" -def detection_status_str(status): - if status == DetectionStatus.NEW: + +def detection_status_str(status: Optional[DetectionStatus]) -> Optional[str]: + if status is None: + return None + elif status == DetectionStatus.NEW: return 'new' elif status == DetectionStatus.RESOLVED: return 'resolved' @@ -44,36 +51,52 @@ def detection_status_str(status): elif status == DetectionStatus.UNAVAILABLE: return 'unavailable' + assert False, f"Unknown review status: {status}" + -def confidentiality_enum(confidentiality: str) -> Confidentiality: +def confidentiality_enum( + confidentiality: Optional[str] +) -> Optional[Confidentiality]: """ Converts the given string to confidentiality Thrift enum value. """ - if confidentiality == 'CONFIDENTIAL': + if confidentiality is None: + return None + elif confidentiality == 'CONFIDENTIAL': return Confidentiality.CONFIDENTIAL elif confidentiality == 'INTERNAL': return Confidentiality.INTERNAL elif confidentiality == 'OPEN': return Confidentiality.OPEN + assert False, f"Unknown confidentiality: {confidentiality}" + -def confidentiality_str(confidentiality: Confidentiality) -> str: +def confidentiality_str( + confidentiality: Optional[Confidentiality] +) -> Optional[str]: """ Converts the given confidentiality to string. """ - if confidentiality == Confidentiality.CONFIDENTIAL: + if confidentiality is None: + return None + elif confidentiality == Confidentiality.CONFIDENTIAL: return 'CONFIDENTIAL' elif confidentiality == Confidentiality.INTERNAL: return 'INTERNAL' elif confidentiality == Confidentiality.OPEN: return 'OPEN' + assert False, f"Unknown confidentiality: {confidentiality}" -def review_status_str(status): + +def review_status_str(status: Optional[ReviewStatus]) -> Optional[str]: """ Returns the given review status Thrift enum value. """ - if status == ReviewStatus.UNREVIEWED: + if status is None: + return None + elif status == ReviewStatus.UNREVIEWED: return 'unreviewed' elif status == ReviewStatus.CONFIRMED: return 'confirmed' @@ -82,12 +105,16 @@ def review_status_str(status): elif status == ReviewStatus.INTENTIONAL: return 'intentional' + assert False, f"Unknown review status: {status}" + -def review_status_enum(status): +def review_status_enum(status: Optional[str]) -> Optional[ReviewStatus]: """ Converts the given review status to string. """ - if status == 'unreviewed': + if status is None: + return None + elif status == 'unreviewed': return ReviewStatus.UNREVIEWED elif status == 'confirmed': return ReviewStatus.CONFIRMED @@ -96,46 +123,68 @@ def review_status_enum(status): elif status == 'intentional': return ReviewStatus.INTENTIONAL + assert False, f"Unknown review status: {status}" -def report_extended_data_type_str(status): + +def report_extended_data_type_str( + status: Optional[ExtendedReportDataType] +) -> Optional[str]: """ Converts the given extended data type to string. """ - if status == ExtendedReportDataType.NOTE: + if status is None: + return None + elif status == ExtendedReportDataType.NOTE: return 'note' elif status == ExtendedReportDataType.MACRO: return 'macro' elif status == ExtendedReportDataType.FIXIT: return 'fixit' + assert False, f"Unknown ExtendedReportDataType: {status}" + -def report_extended_data_type_enum(status): +def report_extended_data_type_enum( + status: Optional[str] +) -> Optional[ExtendedReportDataType]: """ Returns the given extended report data Thrift enum value. """ - if status == 'note': + if status is None: + return None + elif status == 'note': return ExtendedReportDataType.NOTE elif status == 'macro': return ExtendedReportDataType.MACRO elif status == 'fixit': return ExtendedReportDataType.FIXIT + assert False, f"Unknown ExtendedReportDataType: {status}" + -def report_status_str(status): +def report_status_str(status: Optional[ReportStatus]) -> Optional[str]: """ Returns the given report status Thrift enum value. """ - if status == ReportStatus.OUTSTANDING: + if status is None: + return None + elif status == ReportStatus.OUTSTANDING: return 'outstanding' elif status == ReportStatus.CLOSED: return 'closed' + assert False, f"Unknown report status: {status}" -def report_status_enum(status): + +def report_status_enum(status: Optional[str]) -> Optional[ReportStatus]: """ Converts the given report status to string. """ - if status == 'outstanding': + if status is None: + return None + elif status == 'outstanding': return ReportStatus.OUTSTANDING elif status == 'closed': return ReportStatus.CLOSED + + assert False, f"Unknown report status: {status}" diff --git a/web/server/codechecker_server/auth/cc_pam.py b/web/server/codechecker_server/auth/cc_pam.py index 62442d8a8d..bbb7e05608 100644 --- a/web/server/codechecker_server/auth/cc_pam.py +++ b/web/server/codechecker_server/auth/cc_pam.py @@ -51,11 +51,10 @@ def auth_user(pam_config, username, password): if not allowed_users and not allowed_group: # If no filters are set, only authentication is needed. return True + elif username in allowed_users: + # The user is allowed by username. + return True else: - if username in allowed_users: - # The user is allowed by username. - return True - # Otherwise, check group memeberships. If any of the user's # groups are an allowed groupl, the user is allowed. groups = [g.gr_name for g in grp.getgrall() @@ -63,7 +62,6 @@ def auth_user(pam_config, username, password): gid = pwd.getpwnam(username).pw_gid groups.append(grp.getgrgid(gid).gr_name) - return not set(groups).isdisjoint( - set(pam_config.get("groups"))) + return not set(groups).isdisjoint(set(pam_config.get("groups"))) return False diff --git a/web/server/codechecker_server/cmd/server.py b/web/server/codechecker_server/cmd/server.py index a65c8370bf..33bbbd20f1 100644 --- a/web/server/codechecker_server/cmd/server.py +++ b/web/server/codechecker_server/cmd/server.py @@ -345,12 +345,9 @@ def arg_match(options): options = ['--dbaddress', '--dbport', '--dbusername', '--dbname', '--db-host', '--db-port', '--db-username', '--db-name'] psql_args_matching = arg_match(options) - if any(psql_args_matching) and\ - 'postgresql' not in args: - first_matching_arg = next(iter([match for match - in psql_args_matching])) - parser.error("argument {0}: not allowed without " - "argument --postgresql".format(first_matching_arg)) + if any(psql_args_matching) and 'postgresql' not in args: + parser.error(f"argument {psql_args_matching[0]}: not allowed " + "without argument --postgresql") # parser.error() terminates with return code 2. # --not-host-only is a "shortcut", actually a to-be-deprecated @@ -480,7 +477,7 @@ def check_product_db_status(cfg_sql_server, migration_root, environ): config_session = sessionmaker(bind=engine) sess = config_session() - products: List[ORMProduct] = list() + products: List[ORMProduct] = [] try: products = sess.query(ORMProduct) \ .order_by(ORMProduct.endpoint.asc()) \ @@ -621,15 +618,15 @@ def __db_migration_multiple( prod_statuses = check_product_db_status(cfg_sql_server, migration_root, environ) - products_to_upgrade: List[str] = list() + products_to_upgrade: List[str] = [] for endpoint in (products_requested_for_upgrade or []): avail = prod_statuses.get(endpoint) if not avail: LOG.error("No product was found with endpoint '%s'", endpoint) return 1 products_to_upgrade.append(endpoint) - else: - products_to_upgrade = list(prod_statuses.keys()) + + products_to_upgrade = list(prod_statuses.keys()) products_to_upgrade.sort() def _get_migration_decisions() -> List[Tuple[str, str, bool]]: @@ -640,7 +637,7 @@ def _get_migration_decisions() -> List[Tuple[str, str, bool]]: cfg_session_factory = sessionmaker(bind=cfg_engine) cfg_session = cfg_session_factory() - scheduled_upgrades_or_inits: List[Tuple[str, str, bool]] = list() + scheduled_upgrades_or_inits: List[Tuple[str, str, bool]] = [] for endpoint in products_to_upgrade: LOG.info("Checking: %s", endpoint) connection_str: Optional[str] = None @@ -726,7 +723,7 @@ def _get_migration_decisions() -> List[Tuple[str, str, bool]]: LOG.info("========================") if scheduled_upgrades_or_inits: - failed_products: List[Tuple[str, DBStatus]] = list() + failed_products: List[Tuple[str, DBStatus]] = [] thr_count = util.clamp(1, len(scheduled_upgrades_or_inits), cpu_count()) with Pool(max_workers=thr_count) as executor: @@ -746,13 +743,14 @@ def _get_migration_decisions() -> List[Tuple[str, str, bool]]: failed_products.append((product_cfg[0], return_status)) if failed_products: + prod_status = [] + for p in failed_products: + status = database_status.db_status_msg.get( + p[1], "Unknown database status") + prod_status.append(f"'{p[0]}' ({status})") + LOG.error("The following products failed to upgrade: %s", - ", ".join(list(map(lambda p: "'%s' (%s)" % - (p[0], - database_status.db_status_msg.get( - p[1], "Unknown database status") - ), - failed_products)))) + ', '.join(prod_status)) else: LOG.info("Schema initialisation(s)/upgrade(s) executed " "successfully.") @@ -920,7 +918,7 @@ def server_init_start(args): # Actual server starting from this point. if not host_check.check_zlib(): - raise Exception("zlib is not available on the system!") + raise ModuleNotFoundError("zlib is not available on the system!") # WARNING # In case of SQLite args.dbaddress default value is used @@ -976,7 +974,7 @@ def server_init_start(args): LOG.debug("No schema upgrade is possible.") sys.exit(1) - force_upgrade = True if 'force_upgrade' in args else False + force_upgrade = 'force_upgrade' in args if db_status == DBStatus.SCHEMA_MISMATCH_OK: LOG.debug("Configuration database schema mismatch!") @@ -1072,8 +1070,7 @@ def server_init_start(args): upgrade_available = {} for k, v in prod_statuses.items(): db_status, _, _, _ = v - if db_status == DBStatus.SCHEMA_MISMATCH_OK or \ - db_status == DBStatus.SCHEMA_MISSING: + if db_status in (DBStatus.SCHEMA_MISMATCH_OK, DBStatus.SCHEMA_MISSING): upgrade_available[k] = v if upgrade_available: @@ -1147,7 +1144,7 @@ def main(args): args.config_directory) os.makedirs(args.config_directory) - with logger.LOG_CFG_SERVER( + with logger.LogCfgServer( args.verbose if "verbose" in args else None, workspace=workspace ): try: diff --git a/web/server/codechecker_server/database/config_db_model.py b/web/server/codechecker_server/database/config_db_model.py index bfccb91eb6..00f0c4948e 100644 --- a/web/server/codechecker_server/database/config_db_model.py +++ b/web/server/codechecker_server/database/config_db_model.py @@ -60,7 +60,7 @@ def __init__(self, endpoint, conn_str, name=None, description=None, self.run_limit = run_limit self.report_limit = report_limit self.is_review_status_change_disabled = \ - True if is_review_status_change_disabled else False + bool(is_review_status_change_disabled) self.confidentiality = confidentiality diff --git a/web/server/codechecker_server/database/database.py b/web/server/codechecker_server/database/database.py index d2a7690a90..d5abc0bda2 100644 --- a/web/server/codechecker_server/database/database.py +++ b/web/server/codechecker_server/database/database.py @@ -356,7 +356,6 @@ def connect(self, init=False): if needed. """ - pass @abstractmethod def get_connection_string(self) -> str: @@ -375,7 +374,6 @@ def get_db_location(self): DATABASE USERNAME AND PASSWORD SHOULD NOT BE RETURNED HERE! """ - pass def get_model_identifier(self): return self.__model_meta['identifier'] @@ -385,7 +383,6 @@ def _register_engine_hooks(self, engine): This method registers hooks, if needed, related to the engine created by create_engine. """ - pass def create_engine(self): """ @@ -505,11 +502,12 @@ def from_cmdline_args(args, name_in_log: str, model_meta, migration_root, LOG.debug("Using SQLite:") data_file = os.path.abspath(args['sqlite']) LOG.debug("Database at %s", data_file) - return SQLiteDatabase(name_in_log, - data_file, - model_meta, - migration_root, - run_env=env) + return SQLiteDatabase( + name_in_log, + data_file, + model_meta, + migration_root, + run_env=env) class PostgreSQLServer(SQLServer): @@ -637,7 +635,7 @@ def _register_engine_hooks(self, engine): SQLite databases need FOREIGN KEYs to be enabled, which is handled through this connection hook. """ - def _set_sqlite_pragma(dbapi_connection, connection_record): + def _set_sqlite_pragma(dbapi_connection, _): cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() diff --git a/web/server/codechecker_server/database/db_cleanup.py b/web/server/codechecker_server/database/db_cleanup.py index 26911a61b2..74c219f782 100644 --- a/web/server/codechecker_server/database/db_cleanup.py +++ b/web/server/codechecker_server/database/db_cleanup.py @@ -83,7 +83,7 @@ def remove_unused_files(product): # some measurements. Maybe this could be a command-line parameter. But in # the long terms we are planning to reduce cascade deletes by redesigning # bug_path_events and bug_report_points tables. - CHUNK_SIZE = 500_000 + chunk_size = 500_000 with DBSession(product.session_factory) as session: LOG.debug("[%s] Garbage collection of dangling files started...", product.endpoint) @@ -100,7 +100,7 @@ def remove_unused_files(product): files_to_delete = map(lambda x: x[0], files_to_delete) total_count = 0 - for chunk in util.chunks(iter(files_to_delete), CHUNK_SIZE): + for chunk in util.chunks(iter(files_to_delete), chunk_size): q = session.query(File) \ .filter(File.id.in_(chunk)) count = q.delete(synchronize_session=False) @@ -277,9 +277,9 @@ def upgrade_severity_levels(product, checker_labels): ((name_attempt, checker_labels.severity(name_attempt, analyzer)) for name_attempt in [ - "%s.%s" % (analyzer, checker), - "%s-%s" % (analyzer, checker), - "%s/%s" % (analyzer, checker) + f"{analyzer}.{checker}", + f"{analyzer}-{checker}", + f"{analyzer}/{checker}" ]) if severity != "UNSPECIFIED" } diff --git a/web/server/codechecker_server/metadata.py b/web/server/codechecker_server/metadata.py index 13aef8b2fc..02128f2805 100644 --- a/web/server/codechecker_server/metadata.py +++ b/web/server/codechecker_server/metadata.py @@ -64,7 +64,7 @@ def __init__(self, metadata_file_path): self.checkers: MetadataCheckers = {} self.enabled_checkers: EnabledCheckers = set() self.disabled_checkers: DisabledCheckers = set() - self.checker_to_analyzer: CheckerToAnalyzer = dict() + self.checker_to_analyzer: CheckerToAnalyzer = {} self.__metadata_dict: Dict[str, Any] = {} if os.path.isfile(metadata_file_path): diff --git a/web/server/codechecker_server/migrations/logging.py b/web/server/codechecker_server/migrations/logging.py index 12b8ffcba4..264603de29 100644 --- a/web/server/codechecker_server/migrations/logging.py +++ b/web/server/codechecker_server/migrations/logging.py @@ -52,10 +52,9 @@ def setup_logger(schema: str): """ sys_logger = logging.getLogger("system") codechecker_loglvl = sys_logger.getEffectiveLevel() - if codechecker_loglvl >= logging.INFO: - # This might be 30 (WARNING) if the migration is run outside of - # CodeChecker's context, e.g., in a downgrade. - codechecker_loglvl = logging.INFO + # This might be 30 (WARNING) if the migration is run outside of + # CodeChecker's context, e.g., in a downgrade. + codechecker_loglvl = min(codechecker_loglvl, logging.INFO) # Use the default logging class that came with Python for the migration, # temporarily turning away from potentially existing global changes. diff --git a/web/server/codechecker_server/permissions.py b/web/server/codechecker_server/permissions.py index e5cda3fa7d..07df5cc0c3 100644 --- a/web/server/codechecker_server/permissions.py +++ b/web/server/codechecker_server/permissions.py @@ -22,7 +22,7 @@ from codechecker_common.logger import get_logger LOG = get_logger('server') -config_db_model = None # Module will be loaded later... +CONFIG_DB_MODEL = None # Module will be loaded later... class Permission(metaclass=ABCMeta): @@ -101,7 +101,6 @@ def __call__(self, *args, **kwargs): The amount of arguments for this function may vary, depending on the scope of the permission. """ - pass class PermissionHandler(metaclass=ABCMeta): @@ -130,11 +129,11 @@ def __init__(self, permission): # handler is initialized. Most likely this import does nothing, # as the server already executed loading the config_db_model, # so we just set the name to properly point to the module object. - global config_db_model - if config_db_model is None: + global CONFIG_DB_MODEL + if CONFIG_DB_MODEL is None: LOG.debug("Handler initiated for first time, loading ORM...") from .database import config_db_model as ConfigDB - config_db_model = ConfigDB + CONFIG_DB_MODEL = ConfigDB # These high-level methods are used by client code. These contain # control flow that are shared for every permission handler @@ -210,6 +209,7 @@ def has_permission(self, auth_session): # and the server is running in authentication disabled mode. # All permissions are automatically granted in this case. return True + elif auth_session.is_root and self._perm_name == 'SUPERUSER': # The special master superuser (root) automatically has the # SUPERUSER permission. @@ -287,7 +287,7 @@ def __init__(self, permission, config_db_session): self.__session = config_db_session def __get_perm_record(self, auth_name, is_group): - SysPerm = config_db_model.SystemPermission + SysPerm = CONFIG_DB_MODEL.SystemPermission record = self.__session. \ query(SysPerm). \ @@ -310,7 +310,7 @@ def __get_stored_auth_name_and_permissions(self, auth_name, is_group): :returns: A touple in (name, permission_set) structure. """ - SysPerm = config_db_model.SystemPermission + SysPerm = CONFIG_DB_MODEL.SystemPermission stored_auth_name = auth_name permissions = set() @@ -332,13 +332,13 @@ def _add_perm_impl(self, auth_name, is_group=False): auth_name, is_group) if not permissions: # This account have not got permission yet. - new_permission_record = config_db_model.SystemPermission( + new_permission_record = CONFIG_DB_MODEL.SystemPermission( self._permission.name, auth_name, is_group) else: # There are at least one permission of the user. if self._permission.name in permissions: return False # Required permission already granted - new_permission_record = config_db_model.SystemPermission( + new_permission_record = CONFIG_DB_MODEL.SystemPermission( self._permission.name, stored_auth_name, is_group) self.__session.add(new_permission_record) @@ -353,12 +353,14 @@ def _rem_perm_impl(self, auth_name, is_group=False): self.__session.delete(perm_record) return True + return False + def _has_perm_impl(self, auth_names, are_groups=False): if not auth_names: return False auth_names_lower = [name.lower() for name in auth_names] - SysPerm = config_db_model.SystemPermission + SysPerm = CONFIG_DB_MODEL.SystemPermission query = self.__session. \ query(SysPerm). \ filter(and_( @@ -371,7 +373,7 @@ def _has_perm_impl(self, auth_names, are_groups=False): return exists def _list_authorised_impl(self): - SysPerm = config_db_model.SystemPermission + SysPerm = CONFIG_DB_MODEL.SystemPermission result = self.__session. \ query(SysPerm). \ @@ -380,7 +382,7 @@ def _list_authorised_impl(self): return [(p.name, p.is_group) for p in result] def __init__(self, name, **kwargs): - super(SystemPermission, self).__init__(name, **kwargs) + super().__init__(name, **kwargs) CALL_ARGS = ['config_db_session'] @@ -419,7 +421,7 @@ def __init__(self, permission, config_db_session, product_id): self.__product_id = product_id def __get_perm_record(self, auth_name, is_group): - ProdPerm = config_db_model.ProductPermission + ProdPerm = CONFIG_DB_MODEL.ProductPermission record = self.__session. \ query(ProdPerm). \ @@ -442,7 +444,7 @@ def __get_stored_auth_name_and_permissions(self, auth_name, is_group): user's name or a group name. :returns: A touple in (name, permission_set) structure. """ - ProdPerm = config_db_model.ProductPermission + ProdPerm = CONFIG_DB_MODEL.ProductPermission stored_auth_name = auth_name permissions = set() @@ -465,14 +467,14 @@ def _add_perm_impl(self, auth_name, is_group=False): auth_name, is_group) if not permission_set: # This account have not got permission yet. - new_permission_record = config_db_model.ProductPermission( + new_permission_record = CONFIG_DB_MODEL.ProductPermission( self._permission.name, self.__product_id, auth_name, is_group) else: # There are at least one permission of the user. if self._permission.name in permission_set: return False # Required permission already granted - new_permission_record = config_db_model.ProductPermission( + new_permission_record = CONFIG_DB_MODEL.ProductPermission( self._permission.name, self.__product_id, stored_auth_name, is_group) @@ -488,13 +490,15 @@ def _rem_perm_impl(self, auth_name, is_group=False): self.__session.delete(perm_record) return True + return False + def _has_perm_impl(self, auth_names, are_groups=False): if not auth_names: return False # Compare authorization names in a case insensitive way. auth_names_lower = [name.lower() for name in auth_names] - ProdPerm = config_db_model.ProductPermission + ProdPerm = CONFIG_DB_MODEL.ProductPermission query = self.__session. \ query(ProdPerm). \ filter(and_( @@ -508,7 +512,7 @@ def _has_perm_impl(self, auth_names, are_groups=False): return exists def _list_authorised_impl(self): - ProdPerm = config_db_model.ProductPermission + ProdPerm = CONFIG_DB_MODEL.ProductPermission result = self.__session. \ query(ProdPerm). \ @@ -520,7 +524,7 @@ def _list_authorised_impl(self): return [(p.name, p.is_group) for p in result] def __init__(self, name, **kwargs): - super(ProductPermission, self).__init__(name, **kwargs) + super().__init__(name, **kwargs) CALL_ARGS = ['config_db_session', 'productID'] @@ -625,9 +629,7 @@ def initialise_defaults(scope, extra_params): creation of a new product (for PRODUCT permissions), etc. """ - perms = [perm for perm in get_permissions(scope)] - - for perm in perms: + for perm in get_permissions(scope): handler = handler_from_scope_params(perm, extra_params) users, groups = handler.list_permitted() diff --git a/web/server/codechecker_server/profiler.py b/web/server/codechecker_server/profiler.py index 3bcbe12d78..938303dca5 100644 --- a/web/server/codechecker_server/profiler.py +++ b/web/server/codechecker_server/profiler.py @@ -30,6 +30,7 @@ class Timer: to measure code block execution time. """ def __init__(self, block_name=''): + self.before = None self.block_name = block_name def __enter__(self): diff --git a/web/server/codechecker_server/routing.py b/web/server/codechecker_server/routing.py index c993a0915f..79ac8d0686 100644 --- a/web/server/codechecker_server/routing.py +++ b/web/server/codechecker_server/routing.py @@ -80,6 +80,7 @@ def is_supported_version(version): return False +# pylint: disable=invalid-name def split_client_GET_request(path): """ Split the given request URI to its parts relevant to the server. @@ -104,6 +105,7 @@ def split_client_GET_request(path): return None, parsed_path.lstrip('/') +# pylint: disable=invalid-name def split_client_POST_request(path): """ Split the given request URI to its parts relevant to the server. @@ -135,6 +137,7 @@ def split_client_POST_request(path): return None, None, None +# pylint: disable=invalid-name def is_protected_GET_entrypoint(path): """ Returns if the given GET request's PATH enters the server through an diff --git a/web/server/codechecker_server/server.py b/web/server/codechecker_server/server.py index e638d07d1f..b9f22059d0 100644 --- a/web/server/codechecker_server/server.py +++ b/web/server/codechecker_server/server.py @@ -15,8 +15,7 @@ import datetime from functools import partial from hashlib import sha256 -from http.server import HTTPServer, BaseHTTPRequestHandler, \ - SimpleHTTPRequestHandler +from http.server import HTTPServer, SimpleHTTPRequestHandler import os import posixpath from random import sample @@ -83,12 +82,10 @@ class RequestHandler(SimpleHTTPRequestHandler): auth_session = None def __init__(self, request, client_address, server): - BaseHTTPRequestHandler.__init__(self, - request, - client_address, - server) + self.path = None + super().__init__(request, client_address, server) - def log_message(self, msg_format, *args): + def log_message(self, *args): """ Silencing http server. """ return @@ -145,24 +142,11 @@ def __check_session_cookie(self): client_host, client_port, is_ipv6 = \ RequestHandler._get_client_host_port(self.client_address) LOG.debug("%s:%s Invalid access, credentials not found - " - "session refused", - client_host if not is_ipv6 else '[' + client_host + ']', - str(client_port)) + "session refused", + client_host if not is_ipv6 else '[' + client_host + ']', + str(client_port)) return None - def __has_access_permission(self, product): - """ - Returns True if the currently authenticated user has access permission - on the given product. - """ - with DBSession(self.server.config_session) as session: - perm_args = {'productID': product.id, - 'config_db_session': session} - return permissions.require_permission( - permissions.PRODUCT_ACCESS, - perm_args, - self.auth_session) - def __handle_readiness(self): """ Handle readiness probe. """ try: @@ -197,9 +181,7 @@ def end_headers(self): if token: self.send_header( "Set-Cookie", - "{0}={1}; Path=/".format( - session_manager.SESSION_COOKIE_NAME, - token)) + f"{session_manager.SESSION_COOKIE_NAME}={token}; Path=/") # Set the current user name in the header. user_name = self.auth_session.user @@ -258,8 +240,7 @@ def do_GET(self): # Check that path contains a product endpoint. if product_endpoint is not None and product_endpoint != '': - self.path = self.path.replace( - "{0}/".format(product_endpoint), "", 1) + self.path = self.path.replace(f"{product_endpoint}/", "", 1) if self.path == '/': self.path = "index.html" @@ -282,8 +263,8 @@ def __check_prod_db(self, product_endpoint): product = self.server.get_product(product_endpoint) if not product: raise ValueError( - "The product with the given endpoint '{0}' does " - "not exist!".format(product_endpoint)) + f"The product with the given endpoint '{product_endpoint}' " + "does not exist!") if product.db_status == DBStatus.OK: # No reconnect needed. @@ -301,21 +282,20 @@ def __check_prod_db(self, product_endpoint): if product.db_status != DBStatus.OK: # If the reconnection fails send an error to the user. LOG.debug("Product reconnection failed.") - error_msg = "'{0}' database connection " \ - "failed!".format(product.endpoint) + error_msg = f"'{product.endpoint}' database connection failed!" LOG.error(error_msg) raise ValueError(error_msg) else: # Send an error to the user. db_stat = DBStatus._VALUES_TO_NAMES.get(product.db_status) - error_msg = "'{0}' database connection " \ - "failed. DB status: {1}".format(product.endpoint, - str(db_stat)) + error_msg = f"'{product.endpoint}' database connection " \ + f"failed. DB status: {str(db_stat)}" LOG.error(error_msg) raise ValueError(error_msg) return product + # pylint: disable=invalid-name def do_POST(self): """ Handles POST queries, which are usually Thrift messages. @@ -373,9 +353,9 @@ def do_POST(self): try: product_endpoint, api_ver, request_endpoint = \ routing.split_client_POST_request(self.path) - if product_endpoint is None and api_ver is None and\ + if product_endpoint is None and api_ver is None and \ request_endpoint is None: - raise Exception("Invalid request endpoint path.") + raise ValueError("Invalid request endpoint path.") product = None if product_endpoint: @@ -414,9 +394,9 @@ def do_POST(self): elif request_endpoint == 'CodeCheckerService': # This endpoint is a product's report_server. if not product: - error_msg = "Requested CodeCheckerService on a " \ - "nonexistent product: '{0}'." \ - .format(product_endpoint) + error_msg = \ + "Requested CodeCheckerService on a " \ + f"nonexistent product: '{product_endpoint}'." LOG.error(error_msg) raise ValueError(error_msg) @@ -437,14 +417,17 @@ def do_POST(self): processor = ReportAPI_v6.Processor(acc_handler) else: LOG.debug("This API endpoint does not exist.") - error_msg = "No API endpoint named '{0}'." \ - .format(self.path) + error_msg = f"No API endpoint named '{self.path}'." raise ValueError(error_msg) + else: + raise ValueError( + f"API version {major_version} not supported") else: - error_msg = "The API version you are using is not supported " \ - "by this server (server API version: {0})!".format( - get_version_str()) + error_msg = \ + "The API version you are using is not supported " \ + "by this server (server API version: " \ + f"{get_version_str()})!" self.send_thrift_exception(error_msg, iprot, oprot, otrans) return @@ -474,12 +457,10 @@ def do_POST(self): iprot = input_protocol_factory.getProtocol(itrans) self.send_thrift_exception(str(ex), iprot, oprot, otrans) - return def list_directory(self, path): """ Disable directory listing. """ self.send_error(405, "No permission to list directory") - return None def translate_path(self, path): """ @@ -637,7 +618,7 @@ def get_details(self): .filter(RunLock.locked_at.isnot(None)) \ .all() - runs_in_progress = set([run_lock[0] for run_lock in run_locks]) + runs_in_progress = set(run_lock[0] for run_lock in run_locks) num_of_runs = run_db_session.query(Run).count() @@ -713,7 +694,7 @@ def _do_db_cleanups(config_database, context, check_env) \ list of products for which it failed, along with the failure reason. """ def _get_products() -> List[Product]: - products = list() + products = [] cfg_engine = config_database.create_engine() cfg_session_factory = sessionmaker(bind=cfg_engine) with DBSession(cfg_session_factory) as cfg_db: @@ -727,10 +708,10 @@ def _get_products() -> List[Product]: products = _get_products() if not products: - return True, list() + return True, [] thr_count = util.clamp(1, len(products), cpu_count()) - overall_result, failures = True, list() + overall_result, failures = True, [] with Pool(max_workers=thr_count) as executor: LOG.info("Performing database cleanup using %d concurrent jobs...", thr_count) @@ -982,8 +963,9 @@ def get_only_product(self): def remove_product(self, endpoint): product = self.get_product(endpoint) if not product: - raise ValueError("The product with the given endpoint '{0}' does " - "not exist!".format(endpoint)) + raise ValueError( + f"The product with the given endpoint '{endpoint}' does " + "not exist!") LOG.info("Disconnecting product '%s'", endpoint) product.teardown() @@ -995,9 +977,9 @@ def remove_products_except(self, endpoints_to_keep): Removes EVERY product connection from the server except those endpoints specified in :endpoints_to_keep. """ - [self.remove_product(ep) - for ep in list(self.__products) - if ep not in endpoints_to_keep] + for ep in list(self.__products): + if ep not in endpoints_to_keep: + self.remove_product(ep) class CCSimpleHttpServerIPv6(CCSimpleHttpServer): @@ -1007,9 +989,6 @@ class CCSimpleHttpServerIPv6(CCSimpleHttpServer): address_family = socket.AF_INET6 - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - def __make_root_file(root_file): """ @@ -1030,8 +1009,8 @@ def __make_root_file(root_file): # Highlight the message a bit more, as the server owner configuring the # server must know this root access initially. - credential_msg = "The superuser's username is '{0}' with the " \ - "password '{1}'".format(username, password) + credential_msg = f"The superuser's username is '{username}' with the " \ + f"password '{password}'" LOG.info("-" * len(credential_msg)) LOG.info(credential_msg) LOG.info("-" * len(credential_msg)) @@ -1118,7 +1097,7 @@ def start_server(config_directory, package_data, port, config_sql_server, "Earlier logs might contain additional detailed " "reasoning.\n\t* %s", len(fails), "\n\t* ".join( - ("'%s' (%s)" % (ep, reason) for (ep, reason) in fails) + (f"'{ep}' ({reason})" for (ep, reason) in fails) )) else: LOG.debug("Skipping db_cleanup, as requested.") @@ -1146,7 +1125,7 @@ def start_server(config_directory, package_data, port, config_sql_server, processes = [] - def signal_handler(signum, frame): + def signal_handler(signum, _): """ Handle SIGTERM to stop the server running. """ @@ -1162,7 +1141,7 @@ def signal_handler(signum, frame): sys.exit(128 + signum) - def reload_signal_handler(*args, **kwargs): + def reload_signal_handler(*_args, **_kwargs): """ Reloads server configuration file. """ @@ -1195,7 +1174,6 @@ def unregister_handler(pid): atexit.register(unregister_handler, os.getpid()) for _ in range(manager.worker_processes - 1): - # pylint: disable=no-member multiprocess module members. p = multiprocess.Process(target=http_server.serve_forever) processes.append(p) p.start() diff --git a/web/server/codechecker_server/session_manager.py b/web/server/codechecker_server/session_manager.py index 5a3412e837..276af909cd 100644 --- a/web/server/codechecker_server/session_manager.py +++ b/web/server/codechecker_server/session_manager.py @@ -211,7 +211,7 @@ def __init__(self, configuration_file, root_sha, force_auth=False): regex_groups = self.__auth_config['regex_groups'] \ .get('groups', []) - d = dict() + d = {} for group_name, regex_list in regex_groups.items(): d[group_name] = [re.compile(r) for r in regex_list] self.__group_regexes_compiled = d @@ -420,7 +420,7 @@ def __try_auth_token(self, auth_string): .limit(1).one_or_none() if not auth_session: - return False + return None return auth_session except Exception as e: @@ -430,6 +430,8 @@ def __try_auth_token(self, auth_string): if transaction: transaction.close() + return None + def __try_auth_dictionary(self, auth_string): """ Try to authenticate the user against the hardcoded credential list. @@ -571,7 +573,7 @@ def __is_root_user(self, user_name): .filter(SystemPermission.name == user_name) \ .filter(SystemPermission.permission == SUPERUSER.name) \ .limit(1).one_or_none() - return True if system_permission else False + return bool(system_permission) except Exception as e: LOG.error("Couldn't get system permission from database: ") LOG.error(str(e)) @@ -705,7 +707,7 @@ def __get_local_session_from_db(self, token): """ if not self.__database_connection: - return + return None transaction = None try: @@ -733,6 +735,8 @@ def __get_local_session_from_db(self, token): if transaction: transaction.close() + return None + def get_session(self, token): """ Retrieves the session for the given session cookie token from the diff --git a/web/server/tests/unit/test_ccldap.py b/web/server/tests/unit/test_ccldap.py index e026789358..ad4405f8e1 100644 --- a/web/server/tests/unit/test_ccldap.py +++ b/web/server/tests/unit/test_ccldap.py @@ -24,8 +24,8 @@ def simple_bind_s( self, who=None, cred=None, - serverctrls=None, - clientctrls=None + _serverctrls=None, + _clientctrls=None ): success = False @@ -45,15 +45,16 @@ def whoami_s(self): def search_s( self, base, - scope, + _scope, filterstr='(objectClass=*)', - attrlist=None, - attrsonly=0 + _attrlist=None, + _attrsonly=0 ): if base == 'ou=other,o=test' and filterstr == '(cn=user2)': return [( 'cn=user2,ou=other,o=test', {'cn': ['user2'], 'userPassword': ['user2pw']})] + return [] class CCLDAPTest(unittest.TestCase): diff --git a/web/server/tests/unit/test_metadata_parser.py b/web/server/tests/unit/test_metadata_parser.py index 70d400611c..1c1ced9d33 100644 --- a/web/server/tests/unit/test_metadata_parser.py +++ b/web/server/tests/unit/test_metadata_parser.py @@ -138,10 +138,10 @@ class MetadataInfoParserTest(unittest.TestCase): """ Testing metadata parser. """ @classmethod - def setup_class(self): + def setup_class(cls): """ Initialize test files. """ # Already generated plist files for the tests. - self.__metadata_test_files = os.path.join( + cls.__metadata_test_files = os.path.join( os.path.dirname(__file__), 'metadata_test_files') def test_metadata_info_v1(self): diff --git a/web/server/tests/unit/test_request_routing.py b/web/server/tests/unit/test_request_routing.py index ea39228751..d15a2ef2ee 100644 --- a/web/server/tests/unit/test_request_routing.py +++ b/web/server/tests/unit/test_request_routing.py @@ -15,53 +15,53 @@ from codechecker_server.routing import split_client_POST_request -def GET(path, host="http://localhost:8001/"): +def get(path, host="http://localhost:8001/"): return split_client_GET_request(host + path.lstrip('/')) -def POST(path): +def post(path): return split_client_POST_request("http://localhost:8001/" + path.lstrip('/')) -class request_routingTest(unittest.TestCase): +class RequestRoutingTest(unittest.TestCase): """ Testing the router that understands client request queries. """ - def testGET(self): + def test_get(self): """ Test if the server properly splits query addresses for GET. """ - self.assertEqual(GET(''), (None, '')) - self.assertEqual(GET('/', '//'), (None, '')) - self.assertEqual(GET('index.html'), (None, 'index.html')) - self.assertEqual(GET('/images/logo.png'), + self.assertEqual(get(''), (None, '')) + self.assertEqual(get('/', '//'), (None, '')) + self.assertEqual(get('index.html'), (None, 'index.html')) + self.assertEqual(get('/images/logo.png'), (None, 'images/logo.png')) - self.assertEqual(GET('Default'), ('Default', '')) - self.assertEqual(GET('Default/index.html'), ('Default', 'index.html')) + self.assertEqual(get('Default'), ('Default', '')) + self.assertEqual(get('Default/index.html'), ('Default', 'index.html')) - def testPOST(self): + def test_post(self): """ Test if the server properly splits query addresses for POST. """ # The splitter returns (None, None, None) as these are invalid paths. # It is the server code's responsibility to give a 404 Not Found. - self.assertEqual(POST(''), (None, None, None)) - self.assertEqual(POST('CodeCheckerService'), (None, None, None)) + self.assertEqual(post(''), (None, None, None)) + self.assertEqual(post('CodeCheckerService'), (None, None, None)) # Raise an exception if URL is malformed, such as contains a # product-endpoint-like component which is badly encoded version # string. with self.assertRaises(Exception): - POST('v6.0') - POST('/v6/CodeCheckerService') + post('v6.0') + post('/v6/CodeCheckerService') - self.assertEqual(POST('/v6.0/Authentication'), + self.assertEqual(post('/v6.0/Authentication'), (None, '6.0', 'Authentication')) - self.assertEqual(POST('/DummyProduct/v0.0/FoobarService'), + self.assertEqual(post('/DummyProduct/v0.0/FoobarService'), ('DummyProduct', '0.0', 'FoobarService')) diff --git a/web/server/tests/unit/test_url_understanding.py b/web/server/tests/unit/test_url_understanding.py index a71542baa0..4e7db75c5c 100644 --- a/web/server/tests/unit/test_url_understanding.py +++ b/web/server/tests/unit/test_url_understanding.py @@ -25,12 +25,12 @@ def expected_port(protocol=None, port=None): return port -class product_urlTest(unittest.TestCase): +class ProductUrlTest(unittest.TestCase): """ Testing the product and server URL splitter. """ - def testFullURL(self): + def test_full_url(self): """ Whole product URL understanding. """ @@ -52,7 +52,7 @@ def test(host, port, name, protocol=None): test("very-secure.another.server", 443, "CodeChecker", "https") test("contains-a-port-overri.de", 1234, "PRD", "https") - def testProductName(self): + def test_product_name(self): """ Understanding only a product name specified. """ @@ -70,7 +70,7 @@ def test(name, protocol=None): test("MyProduct") test("Enterprise-Product", "https") - def testHostAndProductName(self): + def test_host_and_product_name(self): """ Understanding a host and a product name specified. """ @@ -93,7 +93,7 @@ def test(host, name, protocol=None): test("8080", "MyProduct") test("super", "verygood", "https") - def testBadProductNames(self): + def test_bad_product_names(self): """ Parser throws on bad product URLs? """ @@ -121,7 +121,7 @@ def testBadProductNames(self): with self.assertRaises(ValueError): split_product_url("http://::1:8080/Default") - def testFullServerURL(self): + def test_full_server_url(self): """ Whole server URL understanding. """ @@ -152,7 +152,7 @@ def test(host, port, protocol=None): self.assertEqual(shost, '[::1]') self.assertEqual(sport, 1234) - def testHostname(self): + def test_host_name(self): """ Understanding only a hostname specified for server URLs. """ @@ -170,7 +170,7 @@ def test(host, protocol=None): test("codechecker.local") # Port: 8001 test("www.example.org", "https") # Port: 443 - def testBadServerURLs(self): + def test_bad_server_urls(self): """ Parser throws on bad server URLs? """ diff --git a/web/tests/Makefile b/web/tests/Makefile index 33f0712a09..1b59353e3f 100644 --- a/web/tests/Makefile +++ b/web/tests/Makefile @@ -43,10 +43,9 @@ pycodestyle_in_env: venv_dev PYLINT_TEST_CMD = PYLINTRC=$(ROOT)/.pylintrc \ pylint -j0 \ --ignore=server/codechecker_server/migrations/report,server/codechecker_server/migrations/report/versions,server/codechecker_server/migrations/config,server/codechecker_server/migrations/config/versions \ - ./bin/** \ ./codechecker_web \ - ./client/bin/** ./client/codechecker_client \ - ./server/bin/** ./server/codechecker_server ./server/tests/unit \ + ./client/codechecker_client \ + ./server/codechecker_server ./server/tests/unit \ ./tests/functional ./tests/libtest ./tests/tools \ $(ROOT)/tools/report-converter/codechecker_report_converter diff --git a/web/tests/functional/authentication/__init__.py b/web/tests/functional/authentication/__init__.py index abc73ca6bb..13647e772c 100644 --- a/web/tests/functional/authentication/__init__.py +++ b/web/tests/functional/authentication/__init__.py @@ -17,7 +17,6 @@ from libtest import env import multiprocess -# pylint: disable=no-member multiprocess module members. # Stopping event for CodeChecker server. __STOP_SERVER = multiprocess.Event() diff --git a/web/tests/functional/authentication/test_authentication.py b/web/tests/functional/authentication/test_authentication.py index d0244a5f90..644cc1b657 100644 --- a/web/tests/functional/authentication/test_authentication.py +++ b/web/tests/functional/authentication/test_authentication.py @@ -36,7 +36,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): # Get the test workspace used to authentication tests. self._test_workspace = os.environ['TEST_WORKSPACE'] @@ -71,9 +71,9 @@ def test_privileged_access(self): user = auth_client.getLoggedInUser() self.assertEqual(user, "") - self.sessionToken = auth_client.performLogin("Username:Password", - "cc:test") - self.assertIsNotNone(self.sessionToken, + self.session_token = auth_client.performLogin( + "Username:Password", "cc:test") + self.assertIsNotNone(self.session_token, "Valid credentials didn't give us a token!") handshake = auth_client.getAuthParameters() @@ -84,14 +84,14 @@ def test_privileged_access(self): "Valid session was " + "reported not to be active.") client = env.setup_viewer_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) self.assertIsNotNone(client.getPackageVersion(), "Privileged server didn't respond properly.") authd_auth_client = \ env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) user = authd_auth_client.getLoggedInUser() self.assertEqual(user, "cc") @@ -112,14 +112,14 @@ def test_privileged_access(self): self.assertEqual(personal_tokens[0].description, description) auth_client = env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) result = auth_client.destroySession() self.assertTrue(result, "Server did not allow us to destroy session.") - self.sessionToken = auth_client.performLogin("Username:Password", - "colon:my:password") - self.assertIsNotNone(self.sessionToken, + self.session_token = auth_client.performLogin( + "Username:Password", "colon:my:password") + self.assertIsNotNone(self.session_token, "Valid credentials didn't give us a token!") result = auth_client.destroySession() @@ -135,10 +135,10 @@ def test_privileged_access(self): session_token=token) # Log-in by using an already generated personal token. - self.sessionToken = auth_token_client.performLogin("Username:Password", - "cc:" + token) + self.session_token = auth_token_client.performLogin( + "Username:Password", "cc:" + token) - self.assertIsNotNone(self.sessionToken, + self.assertIsNotNone(self.session_token, "Valid credentials didn't give us a token!") user = auth_token_client.getLoggedInUser() @@ -151,13 +151,13 @@ def test_privileged_access(self): codechecker.logout(self._test_cfg['codechecker_cfg'], self._test_workspace) - self.sessionToken = auth_client.performLogin("Username:Password", - "cc:test") - self.assertIsNotNone(self.sessionToken, + self.session_token = auth_client.performLogin( + "Username:Password", "cc:test") + self.assertIsNotNone(self.session_token, "Valid credentials didn't give us a token!") auth_client = env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) # Remove the generated personal token. ret = auth_client.removeToken(token) self.assertTrue(ret) @@ -216,15 +216,15 @@ def test_group_auth(self): self.assertEqual(user, "") # Create a SUPERUSER login. - self.sessionToken = auth_client.performLogin("Username:Password", - "root:root") + self.session_token = auth_client.performLogin( + "Username:Password", "root:root") - self.assertIsNotNone(self.sessionToken, + self.assertIsNotNone(self.session_token, "Valid credentials didn't give us a token!") authd_auth_client = \ env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) user = authd_auth_client.getLoggedInUser() self.assertEqual(user, "root") @@ -245,15 +245,15 @@ def test_group_auth(self): # Perform login with a user who is in ADMIN_GROUP and check that # he has permission to perform operations. - self.sessionToken = \ + self.session_token = \ auth_client.performLogin("Username:Password", "admin_group_user:admin123") - self.assertIsNotNone(self.sessionToken, + self.assertIsNotNone(self.session_token, "Valid credentials didn't give us a token!") client = env.setup_viewer_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) self.assertIsNotNone(client.allowsStoringAnalysisStatistics(), "Privileged server didn't respond properly.") @@ -265,15 +265,15 @@ def test_regex_groups(self): auth_client = env.setup_auth_client(self._test_workspace, session_token='_PROHIBIT') # First login as root. - self.sessionToken = auth_client.performLogin("Username:Password", - "root:root") - self.assertIsNotNone(self.sessionToken, + self.session_token = auth_client.performLogin( + "Username:Password", "root:root") + self.assertIsNotNone(self.session_token, "root was unable to login!") # Then give SUPERUSER privs to admins_custom_group. authd_auth_client = \ env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) ret = authd_auth_client.addPermission(Permission.SUPERUSER, "admins_custom_group", True, None) @@ -283,14 +283,14 @@ def test_regex_groups(self): self.assertTrue(result, "Server did not allow us to destroy session.") # Login as a user who is in admins_custom_group. - sessionToken = auth_client.performLogin("Username:Password", - "regex_admin:blah") - self.assertIsNotNone(sessionToken, + session_token = auth_client.performLogin( + "Username:Password", "regex_admin:blah") + self.assertIsNotNone(session_token, "Valid credentials didn't give us a token!") # Do something privileged. client = env.setup_viewer_client(self._test_workspace, - session_token=sessionToken) + session_token=session_token) self.assertIsNotNone(client.allowsStoringAnalysisStatistics(), "Privileged call failed.") @@ -298,13 +298,13 @@ def test_regex_groups(self): self.assertTrue(result, "Server did not allow us to destroy session.") # Finally try to do the same with an unprivileged user. - sessionToken = auth_client.performLogin("Username:Password", - "john:doe") - self.assertIsNotNone(sessionToken, + session_token = auth_client.performLogin( + "Username:Password", "john:doe") + self.assertIsNotNone(session_token, "Valid credentials didn't give us a token!") client = env.setup_viewer_client(self._test_workspace, - session_token=sessionToken) + session_token=session_token) self.assertFalse(client.allowsStoringAnalysisStatistics(), "Privileged call from unprivileged user" " did not fail!") diff --git a/web/tests/functional/authentication/test_permission_management.py b/web/tests/functional/authentication/test_permission_management.py index 8f93d1a5c4..35fb23b701 100644 --- a/web/tests/functional/authentication/test_permission_management.py +++ b/web/tests/functional/authentication/test_permission_management.py @@ -29,7 +29,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self._test_workspace = os.environ['TEST_WORKSPACE'] @@ -92,6 +92,7 @@ def __get_real_group_name(self, group_name_guess): for group in authorized_names.groups: if group.lower() == group_name_lower: return group + return None def test_product_permissions(self): """ diff --git a/web/tests/functional/authentication/test_permission_view.py b/web/tests/functional/authentication/test_permission_view.py index b2b64f72f3..43b16fa05b 100644 --- a/web/tests/functional/authentication/test_permission_view.py +++ b/web/tests/functional/authentication/test_permission_view.py @@ -30,7 +30,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self._test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/web/tests/functional/blame/test_blame_info.py b/web/tests/functional/blame/test_blame_info.py index df9b8c48dd..287f728dc8 100644 --- a/web/tests/functional/blame/test_blame_info.py +++ b/web/tests/functional/blame/test_blame_info.py @@ -104,7 +104,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/web/tests/functional/cleanup_plan/test_cleanup_plan.py b/web/tests/functional/cleanup_plan/test_cleanup_plan.py index e36023f002..88ec1f1712 100644 --- a/web/tests/functional/cleanup_plan/test_cleanup_plan.py +++ b/web/tests/functional/cleanup_plan/test_cleanup_plan.py @@ -93,7 +93,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self._test_workspace = os.environ.get('TEST_WORKSPACE') test_class = self.__class__.__name__ diff --git a/web/tests/functional/cli_config/test_server_config.py b/web/tests/functional/cli_config/test_server_config.py index e69568cf3e..e4d495e75a 100644 --- a/web/tests/functional/cli_config/test_server_config.py +++ b/web/tests/functional/cli_config/test_server_config.py @@ -24,7 +24,6 @@ class TestServerConfig(unittest.TestCase): - # pylint: disable=no-member multiprocess module members. _ccClient = None def setup_class(self): @@ -33,7 +32,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/web/tests/functional/cli_config/test_store_config.py b/web/tests/functional/cli_config/test_store_config.py index 3aab095197..264dcff4da 100644 --- a/web/tests/functional/cli_config/test_store_config.py +++ b/web/tests/functional/cli_config/test_store_config.py @@ -31,7 +31,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/web/tests/functional/cmdline/test_cmdline.py b/web/tests/functional/cmdline/test_cmdline.py index eec3ff5efe..ae654109a4 100644 --- a/web/tests/functional/cmdline/test_cmdline.py +++ b/web/tests/functional/cmdline/test_cmdline.py @@ -25,11 +25,11 @@ from libtest import project -def run_cmd(cmd, env=None): +def run_cmd(cmd, environ=None): print(cmd) proc = subprocess.Popen( cmd, - env=env, + env=environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", @@ -137,7 +137,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): test_workspace = os.environ.get('TEST_WORKSPACE') @@ -185,19 +185,20 @@ def test_sum(self): sum_res = [self._codechecker_cmd, 'cmd', 'sum', '-a', '--url', str(self.server_url)] - ret = run_cmd(sum_res, - env=self._test_config['codechecker_cfg']['check_env'])[0] + ret = run_cmd( + sum_res, + environ=self._test_config['codechecker_cfg']['check_env'])[0] self.assertEqual(0, ret) def test_runs_filter(self): """ Test cmd results filter command. """ - env = self._test_config['codechecker_cfg']['check_env'] + environ = self._test_config['codechecker_cfg']['check_env'] # Get runs without filter. res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'json', '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) self.assertEqual(2, len(json.loads(res))) @@ -206,7 +207,7 @@ def test_runs_filter(self): res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'json', '-n', 'test_files*', '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) self.assertEqual(2, len(json.loads(res))) @@ -215,7 +216,7 @@ def test_runs_filter(self): res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'json', '-n', 'test_files1*', '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) self.assertEqual(1, len(json.loads(res))) @@ -223,11 +224,11 @@ def test_runs_filter(self): def test_runs_analysis_statistics(self): """ Test analysis statistics in detailed mode. """ - env = self._test_config['codechecker_cfg']['check_env'] + environ = self._test_config['codechecker_cfg']['check_env'] res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'json', '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) for run in json.loads(res): @@ -238,7 +239,7 @@ def test_runs_analysis_statistics(self): res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'json', '--url', str(self.server_url), '--details'] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) for run in json.loads(res): @@ -252,17 +253,17 @@ def test_proxy_settings(self): server_url = f"{self.codechecker_cfg['viewer_host']}:" \ f"{str(self.codechecker_cfg['viewer_port'])}" - env = self.codechecker_cfg['check_env'].copy() - env['HTTP_PROXY'] = server_url + environ = self.codechecker_cfg['check_env'].copy() + environ['HTTP_PROXY'] = server_url res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '--url', str(self.server_url)] - ret, _, err = run_cmd(res_cmd, env=env) + ret, _, err = run_cmd(res_cmd, environ=environ) self.assertEqual(1, ret) self.assertIn("Invalid proxy format", err) - env['HTTP_PROXY'] = f"http://{server_url}" - _, _, err = run_cmd(res_cmd, env=env) + environ['HTTP_PROXY'] = f"http://{server_url}" + _, _, err = run_cmd(res_cmd, environ=environ) # We can't check the return code here, because on host machine it will # be zero, but on the GitHub action's job it will be 1 with "Failed to @@ -271,24 +272,24 @@ def test_proxy_settings(self): def test_runs_row(self): """ Test cmd row output type. """ - env = self._test_config['codechecker_cfg']['check_env'] + environ = self._test_config['codechecker_cfg']['check_env'] res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'rows', '-n', 'test_files1*', '--url', str(self.server_url)] - ret, _, _ = run_cmd(res_cmd, env=env) + ret, _, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) def test_run_update(self): """ Test to update run name from the command line. """ - env = self._test_config['codechecker_cfg']['check_env'] + environ = self._test_config['codechecker_cfg']['check_env'] # Get runs. res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'json', '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) runs = json.loads(res) @@ -302,20 +303,20 @@ def test_run_update(self): # Empty string as new name. res_cmd = [self._codechecker_cmd, 'cmd', 'update', run_name, '-n', '', '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(1, ret) # Update the run name. res_cmd = [self._codechecker_cmd, 'cmd', 'update', '-n', new_run_name, run_name, '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) # See that the run was renamed. res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'json', '-n', run_name, '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) self.assertEqual(0, len(json.loads(res))) @@ -323,13 +324,13 @@ def test_run_update(self): res_cmd = [self._codechecker_cmd, 'cmd', 'update', '-n', new_run_name, new_run_name, '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(1, ret) # Rename the run back to the original name. res_cmd = [self._codechecker_cmd, 'cmd', 'update', '-n', run_name, new_run_name, '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) def test_results_multiple_runs(self): @@ -341,7 +342,7 @@ def test_results_multiple_runs(self): res_cmd = [self._codechecker_cmd, 'cmd', 'results', 'test_files1*', 'test_files1*', '-o', 'json', '--url', str(self.server_url)] - ret, _, _ = run_cmd(res_cmd, env=check_env) + ret, _, _ = run_cmd(res_cmd, environ=check_env) self.assertEqual(0, ret) def test_detailed_results_contain_run_names(self): @@ -354,7 +355,7 @@ def test_detailed_results_contain_run_names(self): 'test_files1*', '-o', 'json', '--url', str(self.server_url), '--details'] - ret, out, _ = run_cmd(res_cmd, env=check_env) + ret, out, _ = run_cmd(res_cmd, environ=check_env) self.assertEqual(0, ret) results = json.loads(out) @@ -371,7 +372,7 @@ def test_stderr_results(self): res_cmd = [self._codechecker_cmd, 'cmd', 'results', 'non_existing_run', '-o', 'json', '--url', str(self.server_url)] - ret, res, err = run_cmd(res_cmd, env=check_env) + ret, res, err = run_cmd(res_cmd, environ=check_env) self.assertEqual(1, ret) self.assertEqual(res, '') self.assertIn('No runs were found!', err) @@ -387,7 +388,7 @@ def test_stderr_sum(self): 'non_existing_run', '-o', 'json', '--url', str(self.server_url)] - ret, res, err = run_cmd(res_cmd, env=check_env) + ret, res, err = run_cmd(res_cmd, environ=check_env) self.assertEqual(1, ret) self.assertEqual(res, '') self.assertIn('No runs were found!', err) @@ -395,13 +396,13 @@ def test_stderr_sum(self): def test_run_sort(self): """ Test cmd runs sort command. """ - env = self._test_config['codechecker_cfg']['check_env'] + environ = self._test_config['codechecker_cfg']['check_env'] # Sort runs by the default sort type and sort order. res_cmd = [self._codechecker_cmd, 'cmd', 'runs', '-o', 'json', '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) self.assertEqual(2, len(json.loads(res))) @@ -412,7 +413,7 @@ def test_run_sort(self): '--sort', 'name', '--order', 'asc', '--url', str(self.server_url)] - ret, res, _ = run_cmd(res_cmd, env=env) + ret, res, _ = run_cmd(res_cmd, environ=environ) self.assertEqual(0, ret) self.assertEqual(2, len(json.loads(res))) @@ -425,7 +426,7 @@ def test_cmd_component_manage(self): '-*/new_delete.cpp', '-árvíztűrő tükörfúrógép']) - env = self._test_config['codechecker_cfg']['check_env'] + environ = self._test_config['codechecker_cfg']['check_env'] # Add new source component. with tempfile.NamedTemporaryFile() as component_f: @@ -437,7 +438,7 @@ def test_cmd_component_manage(self): component_name, '--url', str(self.server_url)] - ret, out, _ = run_cmd(add_cmd, env=env) + ret, out, _ = run_cmd(add_cmd, environ=environ) self.assertEqual(0, ret) @@ -446,7 +447,7 @@ def test_cmd_component_manage(self): '-o', 'json', '--url', str(self.server_url)] - ret, out, _ = run_cmd(list_cmd, env=env) + ret, out, _ = run_cmd(list_cmd, environ=environ) self.assertEqual(0, ret) res = json.loads(out) @@ -459,5 +460,5 @@ def test_cmd_component_manage(self): component_name, '--url', str(self.server_url)] - ret, _, _ = run_cmd(rm_cmd, env=env) + ret, _, _ = run_cmd(rm_cmd, environ=environ) self.assertEqual(0, ret) diff --git a/web/tests/functional/comment/test_comment.py b/web/tests/functional/comment/test_comment.py index e20f01ca68..83534dc0ed 100644 --- a/web/tests/functional/comment/test_comment.py +++ b/web/tests/functional/comment/test_comment.py @@ -132,7 +132,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self._test_workspace = os.environ.get('TEST_WORKSPACE') test_class = self.__class__.__name__ @@ -146,19 +146,19 @@ def setup_method(self, method): auth_client = env.setup_auth_client(self._test_workspace, session_token='_PROHIBIT') - sessionToken_cc = auth_client.performLogin("Username:Password", - "cc:test") - sessionToken_john = auth_client.performLogin("Username:Password", - "john:doe") + session_token_cc = auth_client.performLogin( + "Username:Password", "cc:test") + session_token_john = auth_client.performLogin( + "Username:Password", "john:doe") self._cc_client =\ env.setup_viewer_client( self._test_workspace, - session_token=sessionToken_cc) + session_token=session_token_cc) self._cc_client_john =\ env.setup_viewer_client( self._test_workspace, - session_token=sessionToken_john) + session_token=session_token_john) self.assertIsNotNone(self._cc_client) # Get the run names which belong to this test @@ -177,7 +177,7 @@ def test_comment(self): """ runid = self._test_runs[0].runId - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid)) run_results = get_all_run_results(self._cc_client, runid) @@ -323,7 +323,7 @@ def test_same_bug_hash(self): # Get run results for the first run. runid_base = self._test_runs[0].runId - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid_base)) run_results_base = get_all_run_results(self._cc_client, runid_base) @@ -334,7 +334,7 @@ def test_same_bug_hash(self): # Get run results for the second run. runid_new = self._test_runs[1].runId - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid_new)) run_results_new = get_all_run_results(self._cc_client, runid_new) diff --git a/web/tests/functional/component/test_component.py b/web/tests/functional/component/test_component.py index b44d981b80..7329a0a774 100644 --- a/web/tests/functional/component/test_component.py +++ b/web/tests/functional/component/test_component.py @@ -112,7 +112,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self._test_workspace = os.environ.get('TEST_WORKSPACE') test_class = self.__class__.__name__ @@ -199,7 +199,7 @@ def setup_method(self, method): } ] - def teardown_method(self, method): + def teardown_method(self, _): self.__remove_all_source_componens() def __add_new_component(self, component): diff --git a/web/tests/functional/cppcheck/test_cppcheck.py b/web/tests/functional/cppcheck/test_cppcheck.py index 8bfafc36d2..e48eb5ace3 100644 --- a/web/tests/functional/cppcheck/test_cppcheck.py +++ b/web/tests/functional/cppcheck/test_cppcheck.py @@ -74,7 +74,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # Get the test workspace used to cppcheck tests. self._test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/web/tests/functional/db_cleanup/test_db_cleanup.py b/web/tests/functional/db_cleanup/test_db_cleanup.py index c068e12e33..f708cff611 100644 --- a/web/tests/functional/db_cleanup/test_db_cleanup.py +++ b/web/tests/functional/db_cleanup/test_db_cleanup.py @@ -63,7 +63,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self.test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ @@ -93,7 +93,7 @@ def setup_method(self, method): self.event = multiprocess.Event() self.event.clear() - def teardown_method(self, method): + def teardown_method(self, _): global TEST_WORKSPACE diff --git a/web/tests/functional/delete_runs/test_delete_runs.py b/web/tests/functional/delete_runs/test_delete_runs.py index 4e21ab03b8..c9dceb54bb 100644 --- a/web/tests/functional/delete_runs/test_delete_runs.py +++ b/web/tests/functional/delete_runs/test_delete_runs.py @@ -27,11 +27,11 @@ from libtest import project -def run_cmd(cmd, env): +def run_cmd(cmd, environ): proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, - env=env, + env=environ, encoding="utf-8", errors="ignore") out, _ = proc.communicate() @@ -111,8 +111,7 @@ def setup_class(self): if ret: sys.exit(1) - print("Analyzing the test project was successful {}." - .format(str(i))) + print(f"Analyzing the test project was successful {i}.") # If the check process is very fast, datetime of multiple runs can # be almost the same different in microseconds. Test cases of @@ -143,7 +142,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] @@ -196,7 +195,7 @@ def get_run_count_in_config_db(): 'list', '-o', 'json', '--url', self.server_url] - out_products, _ = run_cmd(get_products_cmd, env=check_env) + out_products, _ = run_cmd(get_products_cmd, environ=check_env) ret_products = json.loads(out_products) print(ret_products) @@ -218,7 +217,7 @@ def get_run_count_in_config_db(): '--all-after-run', run2_name, '-o', 'json', '--url', self.server_url] - out_runs, _ = run_cmd(get_runs_cmd, env=check_env) + out_runs, _ = run_cmd(get_runs_cmd, environ=check_env) ret_runs = json.loads(out_runs) self.assertEqual(len(ret_runs), 2) @@ -232,7 +231,7 @@ def get_run_count_in_config_db(): 'cmd', 'del', '--all-after-run', run2_name, '--url', self.server_url] - run_cmd(del_cmd, env=check_env) + run_cmd(del_cmd, environ=check_env) self.assertEqual(get_run_count_in_config_db(), 3) @@ -262,7 +261,7 @@ def get_run_count_in_config_db(): '--all-before-time', date_run2, '-o', 'json', '--url', self.server_url] - out_runs, _ = run_cmd(get_runs_cmd, env=check_env) + out_runs, _ = run_cmd(get_runs_cmd, environ=check_env) ret_runs = json.loads(out_runs) self.assertEqual(len(ret_runs), 2) @@ -274,7 +273,7 @@ def get_run_count_in_config_db(): 'cmd', 'del', '--all-before-time', date_run2, '--url', self.server_url] - run_cmd(del_cmd, env=check_env) + run_cmd(del_cmd, environ=check_env) self.assertTrue(all_exists( [project_name + '_' + str(2)])) @@ -287,7 +286,7 @@ def get_run_count_in_config_db(): 'cmd', 'del', '--name', run2_name, '--url', self.server_url] - run_cmd(del_cmd, env=check_env) + run_cmd(del_cmd, environ=check_env) self.assertTrue(none_exists( [project_name + '_' + str(i) for i in range(0, 5)])) diff --git a/web/tests/functional/detection_status/test_detection_status.py b/web/tests/functional/detection_status/test_detection_status.py index d04635b746..0b2796675f 100644 --- a/web/tests/functional/detection_status/test_detection_status.py +++ b/web/tests/functional/detection_status/test_detection_status.py @@ -72,7 +72,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] @@ -173,7 +173,7 @@ def setup_method(self, method): sizeof(42); }"""] - def teardown_method(self, method): + def teardown_method(self, _): """Restore environment after tests have ran.""" os.chdir(self.__old_pwd) @@ -187,8 +187,8 @@ def _check_source_file(self, cfg): def _create_clang_tidy_cfg_file(self, checkers): """ This function will create a .clang-tidy config file. """ - with open(self.clang_tidy_cfg, 'w') as f: - f.write("Checks: '{0}'".format(','.join(checkers))) + with open(self.clang_tidy_cfg, 'w', encoding='utf-8') as f: + f.write(f"Checks: '{','.join(checkers)}'") def test_same_file_change(self): """ @@ -200,7 +200,7 @@ def test_same_file_change(self): self._check_source_file(self._codechecker_cfg) runs = self._cc_client.getRunData(None, None, 0, None) - run_id = max([run.runId for run in runs]) + run_id = max(run.runId for run in runs) reports = self._cc_client.getRunResults([run_id], 100, @@ -212,7 +212,7 @@ def test_same_file_change(self): self.assertEqual(len(reports), 5) self.assertTrue( - all([r.detectionStatus == DetectionStatus.NEW for r in reports])) + all(r.detectionStatus == DetectionStatus.NEW for r in reports)) # Check the second file version self._create_source_file(1) @@ -234,7 +234,7 @@ def test_same_file_change(self): self.assertIn(report.bugHash, ['cbd629ba2ee25c41cdbf5e2e336b1b1c']) else: - self.assertTrue(False) + self.fail() # Check the third file version self._create_source_file(2) @@ -282,7 +282,7 @@ def test_same_file_change(self): "content.") else: - self.assertTrue(False) + self.fail() # Check the second file version again self._create_source_file(1) @@ -359,7 +359,7 @@ def test_check_without_metadata(self): codechecker.store(self._codechecker_cfg, 'hello') runs = self._cc_client.getRunData(None, None, 0, None) - run_id = max([run.runId for run in runs]) + run_id = max(run.runId for run in runs) reports = self._cc_client.getRunResults([run_id], 100, @@ -496,9 +496,8 @@ def test_store_multiple_dir_no_off(self): self._test_dir = orig_test_dir # Store two report directory. - cfg['reportdir'] = '{0} {1}'.format( - cfg['reportdir'], - self._codechecker_cfg['reportdir']) + cfg['reportdir'] = \ + f"{cfg['reportdir']} {self._codechecker_cfg['reportdir']}" codechecker.store(cfg, 'hello') # Check that no reports are marked as OFF. diff --git a/web/tests/functional/diff_cmdline/test_diff_cmdline.py b/web/tests/functional/diff_cmdline/test_diff_cmdline.py index 765ba82a05..5b0a7bf738 100644 --- a/web/tests/functional/diff_cmdline/test_diff_cmdline.py +++ b/web/tests/functional/diff_cmdline/test_diff_cmdline.py @@ -80,7 +80,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self.test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ @@ -89,7 +89,7 @@ def setup_method(self, method): self._cc_client = env.setup_viewer_client(self.test_workspace) self.assertIsNotNone(self._cc_client) - def teardown_method(self, method): + def teardown_method(self, _): """ Remove all review status rules after each test cases. """ self.__remove_all_runs() self.__remove_all_rules() @@ -129,10 +129,11 @@ def __analyze(self, file_dir, source_code): """ os.makedirs(file_dir, exist_ok=True) - with open(os.path.join(file_dir, "main.c"), "w") as f: + with open(os.path.join(file_dir, "main.c"), "w", + encoding='utf-8') as f: f.write(source_code) - with open(build_json_path, "w") as f: + with open(build_json_path, "w", encoding='utf-8') as f: f.write(build_json) codechecker_cfg = env.import_codechecker_cfg(self.test_workspace) @@ -154,13 +155,6 @@ def __analyze_and_store(self, file_dir, store_name, source_code, tag=None): self.__analyze(file_dir, source_code) self.__store(file_dir, store_name, tag) - def __get_run_id(self, run_name): - runs = self._cc_client.getRunData(None, None, 0, None) - self.assertEqual(len(runs), 1) - test_run = [run for run in runs if run.name == run_name] - self.assertEqual(len(test_run), 1) - return test_run[0].runid - # ===-----------------------------------------------------------------=== # # Local-local tests. # ===-----------------------------------------------------------------=== # @@ -248,13 +242,13 @@ def get_run_diff_count(diff_type: DiffType): # There is a single report that has remained. self.assertEqual(get_run_diff_count(DiffType.UNRESOLVED), 1) - def test_localFPAnnotated_local_identical(self): + def test_local_fp_annotated_local_identical(self): # Diff identical, local runs, where the baseline report is suppressed # via //codechecker_suppress. dir1 = os.path.join(self.test_workspace, "dir1") dir2 = os.path.join(self.test_workspace, "dir2") - src_div_by_zero_FP = """ + src_div_by_zero_fp = """ void a() { int i = 0; // codechecker_false_positive [all] SUPPRESS ALL @@ -267,7 +261,7 @@ def test_localFPAnnotated_local_identical(self): (void)(10 / i); } """ - self.__analyze(dir1, src_div_by_zero_FP) + self.__analyze(dir1, src_div_by_zero_fp) self.__analyze(dir2, src_div_by_zero) def get_run_diff_count(diff_type: DiffType): @@ -397,13 +391,13 @@ def get_run_diff_count_reverse(diff_type: DiffType): self.assertEqual(get_run_diff_count_reverse(DiffType.RESOLVED), 0) self.assertEqual(get_run_diff_count_reverse(DiffType.UNRESOLVED), 1) - def test_localFPAnnotated_remote_identical(self): + def test_local_fp_annotated_remote_identical(self): # Create two identical runs, store one on the server, leave one # locally. dir1 = os.path.join(self.test_workspace, "dir1") dir2 = os.path.join(self.test_workspace, "dir2") - src_div_by_zero_FP = """ + src_div_by_zero_fp = """ void a() { int i = 0; // codechecker_false_positive [all] SUPPRESS ALL @@ -416,7 +410,7 @@ def test_localFPAnnotated_remote_identical(self): (void)(10 / i); } """ - self.__analyze(dir1, src_div_by_zero_FP) + self.__analyze(dir1, src_div_by_zero_fp) self.__analyze_and_store(dir2, "run2", src_div_by_zero) report_filter = ReportFilter() @@ -455,13 +449,13 @@ def get_run_diff_count_reverse(diff_type: DiffType): # There are no common reports. self.assertEqual(get_run_diff_count_reverse(DiffType.UNRESOLVED), 0) - def test_local_remoteFPAnnotated_identical(self): + def test_local_remote_fp_annotated_identical(self): # Create two identical runs, store one on the server with a FP source # code suppression, leave one locally. dir1 = os.path.join(self.test_workspace, "dir1") dir2 = os.path.join(self.test_workspace, "dir2") - src_div_by_zero_FP = """ + src_div_by_zero_fp = """ void a() { int i = 0; // codechecker_false_positive [all] SUPPRESS ALL @@ -475,7 +469,7 @@ def test_local_remoteFPAnnotated_identical(self): } """ self.__analyze(dir1, src_div_by_zero) - self.__analyze_and_store(dir2, "run2", src_div_by_zero_FP) + self.__analyze_and_store(dir2, "run2", src_div_by_zero_fp) report_filter = ReportFilter() report_filter.reviewStatus = [] @@ -503,7 +497,7 @@ def get_run_diff_count_reverse(diff_type: DiffType): self.assertEqual(get_run_diff_count_reverse(DiffType.RESOLVED), 1) self.assertEqual(get_run_diff_count_reverse(DiffType.UNRESOLVED), 0) - def test_local_remoteReviewStatusRule_identical(self): + def test_local_remote_reviewstatusrule_identical(self): """ Even though the local report is not marked as a false positive, we expect the review status rule on the server to affect it. @@ -578,7 +572,7 @@ def get_run_diff_count_reverse(diff_type: DiffType): # Ideally, diffing tags should work the same as diffing two remote runs or # local directory. - def test_remoteTag_remoteTag_identical(self): + def test_remotetag_remotetag_identical(self): dir1 = os.path.join(self.test_workspace, "dir1") dir2 = os.path.join(self.test_workspace, "dir2") @@ -616,7 +610,7 @@ def get_run_diff_count_reverse(diff_type: DiffType): self.assertEqual(get_run_diff_count_reverse(DiffType.RESOLVED), 0) self.assertEqual(get_run_diff_count_reverse(DiffType.UNRESOLVED), 1) - def test_remoteTag_remoteTag_different(self): + def test_remotetag_remotetag_different(self): dir1 = os.path.join(self.test_workspace, "dir1") dir2 = os.path.join(self.test_workspace, "dir2") @@ -670,7 +664,7 @@ def get_run_diff_count_reverse(diff_type: DiffType): self.assertEqual(get_run_diff_count_reverse(DiffType.RESOLVED), 1) self.assertEqual(get_run_diff_count_reverse(DiffType.UNRESOLVED), 0) - def test_remoteTagFPAnnotated_remoteTag_identical(self): + def test_remotetag_fp_annotated_remotetag_identical(self): """ Test source code suppression changes -- in tag1, a FP suppression is present, and in tag2, it disappears. Internally, as of writing, this @@ -680,7 +674,7 @@ def test_remoteTagFPAnnotated_remoteTag_identical(self): dir1 = os.path.join(self.test_workspace, "dir1") dir2 = os.path.join(self.test_workspace, "dir2") - src_div_by_zero_FP = """ + src_div_by_zero_fp = """ void a() { int i = 0; // codechecker_false_positive [all] SUPPRESS ALL @@ -694,7 +688,7 @@ def test_remoteTagFPAnnotated_remoteTag_identical(self): } """ # Note that we're storing under the same run. - self.__analyze_and_store(dir1, "run1", src_div_by_zero_FP, "tag1") + self.__analyze_and_store(dir1, "run1", src_div_by_zero_fp, "tag1") self.__analyze_and_store(dir2, "run1", src_div_by_zero, "tag2") report_filter = ReportFilter() @@ -725,7 +719,7 @@ def get_run_diff_count_reverse(diff_type: DiffType): self.assertEqual(get_run_diff_count_reverse(DiffType.RESOLVED), 0) self.assertEqual(get_run_diff_count_reverse(DiffType.UNRESOLVED), 1) - def test_remoteTag_remoteTagFPAnnotated(self): + def test_remotetag_remotetag_fp_annotated(self): """ Test source code suppression changes -- in tag1, there is no suppression, and in tag2, there is an FP suppression. This should be @@ -742,7 +736,7 @@ def test_remoteTag_remoteTagFPAnnotated(self): (void)(10 / i); } """ - src_div_by_zero_FP = """ + src_div_by_zero_fp = """ void a() { int i = 0; // codechecker_false_positive [all] SUPPRESS ALL @@ -751,7 +745,7 @@ def test_remoteTag_remoteTagFPAnnotated(self): """ # Note that we're storing under the same run. self.__analyze_and_store(dir1, "run1", src_div_by_zero, "tag1") - self.__analyze_and_store(dir2, "run1", src_div_by_zero_FP, "tag2") + self.__analyze_and_store(dir2, "run1", src_div_by_zero_fp, "tag2") report_filter = ReportFilter() report_filter.reviewStatus = [] @@ -777,7 +771,7 @@ def get_run_diff_count_reverse(diff_type: DiffType): self.assertEqual(get_run_diff_count_reverse(DiffType.RESOLVED), 0) self.assertEqual(get_run_diff_count_reverse(DiffType.UNRESOLVED), 0) - def test_remoteTagReviewStatusRule_remoteTag_identical(self): + def test_remotetag_reviewstatusrule_remotetag_identical(self): """ You can find more context for why this is the expected result in the docs of test_local_remoteReviewStatusRule_identical. @@ -826,7 +820,7 @@ def get_run_diff_count_reverse(diff_type: DiffType): self.assertEqual(get_run_diff_count_reverse(DiffType.RESOLVED), 0) self.assertEqual(get_run_diff_count_reverse(DiffType.UNRESOLVED), 0) - def test_remoteTag_remoteTag_FixedAtDate(self): + def test_remotetag_remotetag_fixedatdate(self): """ When a run disappears from one tag to the next, we regard it as fixed, and set it fixed_at date. Test whether just because the fixed_at date diff --git a/web/tests/functional/diff_local/test_diff_local.py b/web/tests/functional/diff_local/test_diff_local.py index e15f2e29a2..b2e75c4a9b 100644 --- a/web/tests/functional/diff_local/test_diff_local.py +++ b/web/tests/functional/diff_local/test_diff_local.py @@ -122,7 +122,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] @@ -318,8 +318,8 @@ def test_suppress_reports(self): """ cfg = dict(self._codechecker_cfg) - makefile = f"all:\n\t$(CXX) -c main.cpp -Wno-all -Wno-extra " \ - f"-o /dev/null\n" + makefile = "all:\n\t$(CXX) -c main.cpp -Wno-all -Wno-extra " \ + "-o /dev/null\n" with open(os.path.join(self._test_dir, 'Makefile'), 'w', encoding="utf-8", errors="ignore") as f: f.write(makefile) diff --git a/web/tests/functional/diff_local_remote/test_diff_local_remote.py b/web/tests/functional/diff_local_remote/test_diff_local_remote.py index cbd9cfcbf7..c820379f3d 100644 --- a/web/tests/functional/diff_local_remote/test_diff_local_remote.py +++ b/web/tests/functional/diff_local_remote/test_diff_local_remote.py @@ -161,7 +161,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] @@ -444,15 +444,15 @@ def test_diff_gerrit_output(self): """ export_dir = os.path.join(self._local_reports, "export_dir1") - env = self._env.copy() - env["CC_REPO_DIR"] = '' - env["CC_CHANGED_FILES"] = '' + environ = self._env.copy() + environ["CC_REPO_DIR"] = '' + environ["CC_CHANGED_FILES"] = '' get_diff_results( [self._run_names[0]], [self._local_reports], '--new', 'gerrit', ["--url", self._url, "-e", export_dir], - env) + environ) gerrit_review_file = os.path.join(export_dir, 'gerrit_review.json') self.assertTrue(os.path.exists(gerrit_review_file)) @@ -478,11 +478,11 @@ def test_diff_gerrit_output(self): self.assertIn("message", report) self.assertIn("range", report) - range = report["range"] - self.assertIn("start_line", range) - self.assertIn("start_character", range) - self.assertIn("end_line", range) - self.assertIn("end_character", range) + report_range = report["range"] + self.assertIn("start_line", report_range) + self.assertIn("start_character", report_range) + self.assertIn("end_line", report_range) + self.assertIn("end_character", report_range) shutil.rmtree(export_dir, ignore_errors=True) @@ -492,15 +492,15 @@ def test_diff_gerrit_stdout(self): Only one output format was selected the gerrit review json should be printed to stdout. """ - env = self._env.copy() - env["CC_REPO_DIR"] = '' - env["CC_CHANGED_FILES"] = '' + environ = self._env.copy() + environ["CC_REPO_DIR"] = '' + environ["CC_CHANGED_FILES"] = '' review_data, _, _ = get_diff_results( [self._run_names[0]], [self._local_reports], '--new', 'gerrit', ["--url", self._url], - env) + environ) print(review_data) review_data = json.loads(review_data) @@ -521,11 +521,11 @@ def test_diff_gerrit_stdout(self): self.assertIn("message", report) self.assertIn("range", report) - range = report["range"] - self.assertIn("start_line", range) - self.assertIn("start_character", range) - self.assertIn("end_line", range) - self.assertIn("end_character", range) + report_range = report["range"] + self.assertIn("start_line", report_range) + self.assertIn("start_character", report_range) + self.assertIn("end_line", report_range) + self.assertIn("end_character", report_range) def test_set_env_diff_gerrit_output(self): """Test gerrit output when using diff and set env vars. @@ -535,11 +535,11 @@ def test_set_env_diff_gerrit_output(self): """ export_dir = os.path.join(self._local_reports, "export_dir2") - env = self._env.copy() - env["CC_REPO_DIR"] = self._local_test_project + environ = self._env.copy() + environ["CC_REPO_DIR"] = self._local_test_project report_url = "localhost:8080/index.html" - env["CC_REPORT_URL"] = report_url + environ["CC_REPORT_URL"] = report_url changed_file_path = os.path.join(self._local_reports, 'files_changed') @@ -553,7 +553,7 @@ def test_set_env_diff_gerrit_output(self): "divide_zero.cpp": {}} changed_file.write(json.dumps(changed_files)) - env["CC_CHANGED_FILES"] = changed_file_path + environ["CC_CHANGED_FILES"] = changed_file_path _, err, _ = get_diff_results( [self._run_names[0]], [self._local_reports], @@ -567,7 +567,7 @@ def test_set_env_diff_gerrit_output(self): get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', 'gerrit', ["--url", self._url, "-e", export_dir], - env) + environ) gerrit_review_file = os.path.join(export_dir, 'gerrit_review.json') self.assertTrue(os.path.exists(gerrit_review_file)) @@ -599,13 +599,13 @@ def test_diff_codeclimate_output(self): """ Test codeclimate output when using diff and set env vars. """ export_dir = os.path.join(self._local_reports, "export_dir") - env = self._env.copy() - env["CC_REPO_DIR"] = self._local_test_project + environ = self._env.copy() + environ["CC_REPO_DIR"] = self._local_test_project get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', 'codeclimate', ["--url", self._url, "-e", export_dir], - env) + environ) codeclimate_issues_file = os.path.join(export_dir, 'codeclimate_issues.json') @@ -665,7 +665,7 @@ def test_diff_no_trim_codeclimate_output(self): file_path = malloc_issues[0]["location"]["path"] self.assertTrue(os.path.isabs(file_path)) - self.assertTrue(file_path.endswith(f"/new_delete.cpp")) + self.assertTrue(file_path.endswith("/new_delete.cpp")) shutil.rmtree(export_dir_path, ignore_errors=True) @@ -673,9 +673,9 @@ def test_diff_multiple_output(self): """ Test multiple output type for diff command. """ export_dir = os.path.join(self._local_reports, "export_dir3") - env = self._env.copy() - env["CC_REPO_DIR"] = '' - env["CC_CHANGED_FILES"] = '' + environ = self._env.copy() + environ["CC_REPO_DIR"] = '' + environ["CC_CHANGED_FILES"] = '' out, _, _ = get_diff_results( [self._run_names[0]], [self._local_reports], @@ -683,7 +683,7 @@ def test_diff_multiple_output(self): ["-o", "html", "gerrit", "plaintext", "-e", export_dir, "--url", self._url], - env) + environ) print(out) # Check the plaintext output. @@ -761,7 +761,7 @@ def test_remote_to_local_with_baseline_file(self): '--new', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) - new_hashes = sorted(set([n['report_hash'] for n in res])) + new_hashes = sorted(set(n['report_hash'] for n in res)) new_results, err, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], '--new', 'json', @@ -781,7 +781,7 @@ def test_remote_to_local_with_baseline_file(self): '--unresolved', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) - unresolved_hashes = sorted(set([n['report_hash'] for n in res])) + unresolved_hashes = sorted(set(n['report_hash'] for n in res)) unresolved_results, err, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], @@ -802,7 +802,7 @@ def test_remote_to_local_with_baseline_file(self): '--resolved', 'json', ["--url", self._url, "--review-status", "unreviewed", "confirmed", "false_positive"]) - resolved_hashes = set([n['report_hash'] for n in res]) + resolved_hashes = set(n['report_hash'] for n in res) resolved_results, _, returncode = get_diff_results( [self._run_names[0]], [baseline_file_path], '--resolved', 'json', diff --git a/web/tests/functional/diff_local_remote_suppress/__init__.py b/web/tests/functional/diff_local_remote_suppress/__init__.py index 1e7204f557..25f583a0c0 100644 --- a/web/tests/functional/diff_local_remote_suppress/__init__.py +++ b/web/tests/functional/diff_local_remote_suppress/__init__.py @@ -71,7 +71,7 @@ def setup_class_common(workspace_name): codechecker.add_test_package_product( server_access, os.environ['TEST_WORKSPACE']) - TEST_WORKSPACE = os.environ['TEST_WORKSPACE'] + test_workspace = os.environ['TEST_WORKSPACE'] test_project = 'cpp' @@ -82,8 +82,8 @@ def setup_class_common(workspace_name): codechecker_cfg = { 'suppress_file': None, 'skip_list_file': None, - 'check_env': env.test_env(TEST_WORKSPACE), - 'workspace': TEST_WORKSPACE, + 'check_env': env.test_env(test_workspace), + 'workspace': test_workspace, 'checkers': [], 'analyzers': ['clangsa'], 'run_names': {} @@ -98,16 +98,16 @@ def setup_class_common(workspace_name): codechecker_cfg.update(server_access) - env.export_test_cfg(TEST_WORKSPACE, test_config) - cc_client = env.setup_viewer_client(TEST_WORKSPACE) + env.export_test_cfg(test_workspace, test_config) + cc_client = env.setup_viewer_client(test_workspace) for run_data in cc_client.getRunData(None, None, 0, None): cc_client.removeRun(run_data.runId, None) # Copy "cpp" test project 3 times to different directories. - test_proj_path_orig = os.path.join(TEST_WORKSPACE, "test_proj_orig") - test_proj_path_1 = os.path.join(TEST_WORKSPACE, "test_proj_1") - test_proj_path_2 = os.path.join(TEST_WORKSPACE, "test_proj_2") + test_proj_path_orig = os.path.join(test_workspace, "test_proj_orig") + test_proj_path_1 = os.path.join(test_workspace, "test_proj_1") + test_proj_path_2 = os.path.join(test_workspace, "test_proj_2") shutil.rmtree(test_proj_path_orig, ignore_errors=True) shutil.rmtree(test_proj_path_1, ignore_errors=True) @@ -183,15 +183,15 @@ def setup_class_common(workspace_name): sys.exit(1) # Export the test configuration to the workspace. - env.export_test_cfg(TEST_WORKSPACE, test_config) + env.export_test_cfg(test_workspace, test_config) def teardown_class_common(): - TEST_WORKSPACE = os.environ['TEST_WORKSPACE'] + test_workspace = os.environ['TEST_WORKSPACE'] - check_env = env.import_test_cfg(TEST_WORKSPACE)[ + check_env = env.import_test_cfg(test_workspace)[ 'codechecker_cfg']['check_env'] - codechecker.remove_test_package_product(TEST_WORKSPACE, check_env) + codechecker.remove_test_package_product(test_workspace, check_env) - print("Removing: " + TEST_WORKSPACE) - shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) + print("Removing: " + test_workspace) + shutil.rmtree(test_workspace, ignore_errors=True) diff --git a/web/tests/functional/diff_local_remote_suppress/test_diff_local_remote_suppress.py b/web/tests/functional/diff_local_remote_suppress/test_diff_local_remote_suppress.py index e091006653..cc7996b459 100644 --- a/web/tests/functional/diff_local_remote_suppress/test_diff_local_remote_suppress.py +++ b/web/tests/functional/diff_local_remote_suppress/test_diff_local_remote_suppress.py @@ -32,7 +32,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/web/tests/functional/diff_local_remote_suppress/test_diff_local_remote_suppress_rule.py b/web/tests/functional/diff_local_remote_suppress/test_diff_local_remote_suppress_rule.py index 278674e81f..a39e772909 100644 --- a/web/tests/functional/diff_local_remote_suppress/test_diff_local_remote_suppress_rule.py +++ b/web/tests/functional/diff_local_remote_suppress/test_diff_local_remote_suppress_rule.py @@ -35,7 +35,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] @@ -75,10 +75,10 @@ def setup_method(self, method): self._env = self._test_cfg['codechecker_cfg']['check_env'] - self.guiSuppressAllHashes('core.CallAndMessage') - self.guiSuppressAllHashes('core.StackAddressEscape') + self.gui_suppress_all_hashes('core.CallAndMessage') + self.gui_suppress_all_hashes('core.StackAddressEscape') - def guiSuppressAllHashes(self, checker_name): + def gui_suppress_all_hashes(self, checker_name): project_orig_run_name = \ self._test_cfg['codechecker_cfg']['run_names']['test_project_orig'] run_filter = RunFilter(names=[project_orig_run_name]) diff --git a/web/tests/functional/diff_remote/test_diff_remote.py b/web/tests/functional/diff_remote/test_diff_remote.py index 4c94f5718f..77814c51a4 100644 --- a/web/tests/functional/diff_remote/test_diff_remote.py +++ b/web/tests/functional/diff_remote/test_diff_remote.py @@ -288,7 +288,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] @@ -988,7 +988,8 @@ def test_source_line_content(self): # Check HTML output for file_path in os.listdir(html_reports): - with open(os.path.join(html_reports, file_path)) as f: + with open(os.path.join(html_reports, file_path), + encoding='utf-8') as f: self.assertNotIn(InvalidFileContentMsg, f.read()) shutil.rmtree(html_reports, ignore_errors=True) diff --git a/web/tests/functional/dynamic_results/test_dynamic_results.py b/web/tests/functional/dynamic_results/test_dynamic_results.py index e88540f737..14519dd3a3 100644 --- a/web/tests/functional/dynamic_results/test_dynamic_results.py +++ b/web/tests/functional/dynamic_results/test_dynamic_results.py @@ -24,8 +24,6 @@ from codechecker_api.codeCheckerDBAccess_v6.ttypes import \ Order, Pair, ReportFilter, SortMode, SortType -from libtest import env - class DynamicResults(unittest.TestCase): @@ -86,7 +84,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self.test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ diff --git a/web/tests/functional/export_import/test_export_import.py b/web/tests/functional/export_import/test_export_import.py index cef256220a..abc63bd700 100644 --- a/web/tests/functional/export_import/test_export_import.py +++ b/web/tests/functional/export_import/test_export_import.py @@ -136,7 +136,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] @@ -171,7 +171,7 @@ def test_export_import(self): """ run_filter = RunFilter() run_filter.names = [self.test_runs[0].name] - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(self.test_runs[0].runId)) run_results = get_all_run_results( diff --git a/web/tests/functional/extended_report_data/test_extended_report_data.py b/web/tests/functional/extended_report_data/test_extended_report_data.py index 0b7dc5f348..26e475db7f 100644 --- a/web/tests/functional/extended_report_data/test_extended_report_data.py +++ b/web/tests/functional/extended_report_data/test_extended_report_data.py @@ -101,7 +101,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self._test_workspace = os.environ.get('TEST_WORKSPACE') test_class = self.__class__.__name__ diff --git a/web/tests/functional/instance_manager/test_instances.py b/web/tests/functional/instance_manager/test_instances.py index f57df2ec5f..0e7fc3a1d6 100644 --- a/web/tests/functional/instance_manager/test_instances.py +++ b/web/tests/functional/instance_manager/test_instances.py @@ -23,7 +23,6 @@ from libtest.codechecker import start_server import multiprocess -# pylint: disable=no-member multiprocess module members. # Stopping events for CodeChecker servers. EVENT_1 = multiprocess.Event() EVENT_2 = multiprocess.Event() @@ -100,7 +99,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # Get the test workspace used to tests. self._test_workspace = os.environ['TEST_WORKSPACE'] @@ -124,7 +123,7 @@ def run_cmd(self, cmd): print(out) return proc.returncode - def testServerStart(self): + def test_server_start(self): """Started server writes itself to instance list.""" test_cfg = env.import_test_cfg(self._test_workspace) @@ -140,7 +139,7 @@ def testServerStart(self): "The started server did not register itself to the" " instance list.") - def testServerStartSecondary(self): + def test_server_start_secondary(self): """Another started server appends itself to instance list.""" test_cfg = env.import_test_cfg(self._test_workspace) @@ -168,12 +167,12 @@ def testServerStartSecondary(self): "The ports for the two started servers were not found" " in the instance list.") - def testShutdownRecordKeeping(self): + def test_shutdown_record_keeping(self): """Test that one server's shutdown keeps the other records.""" # NOTE: Do NOT rename this method. It MUST come lexicographically - # AFTER testServerStartSecondary, because we shut down a server started - # by the aforementioned method. + # AFTER test_server_start_secondary, because we shut down a server + # started by the aforementioned method. # Kill the second started server. EVENT_2.set() @@ -200,7 +199,7 @@ def testShutdownRecordKeeping(self): "The stopped server did not disappear from the" " instance list.") - def testShutdownTerminateByCmdline(self): + def test_shutdown_terminate_by_cmdline(self): """Tests that the command-line command actually kills the server, and that it does not kill anything else.""" @@ -262,7 +261,7 @@ def testShutdownTerminateByCmdline(self): "The stopped server made another server's record " "appear in the instance list.") - def testShutdownTerminateStopAll(self): + def test_shutdown_terminate_stop_all(self): """Tests that --stop-all kills all servers on the host.""" # NOTE: Yet again keep the lexicographical flow, no renames! diff --git a/web/tests/functional/products/test_config_db_share.py b/web/tests/functional/products/test_config_db_share.py index 4468b59385..429a1df683 100644 --- a/web/tests/functional/products/test_config_db_share.py +++ b/web/tests/functional/products/test_config_db_share.py @@ -31,7 +31,6 @@ from libtest import env import multiprocess -# pylint: disable=no-member multiprocess module members. # Stopping events for CodeChecker server. EVENT = multiprocess.Event() @@ -45,7 +44,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): """ Set up the environment and the test module's configuration from the package. @@ -233,7 +232,7 @@ def create_test_product(product_name, product_endpoint): "the product missing should've resulted in " "an error.") - def teardown_method(self, method): + def teardown_method(self, _): """ Clean the environment after running this test module """ diff --git a/web/tests/functional/products/test_products.py b/web/tests/functional/products/test_products.py index ce5b2ab114..2019dd7231 100644 --- a/web/tests/functional/products/test_products.py +++ b/web/tests/functional/products/test_products.py @@ -37,9 +37,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): - """ - """ + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/web/tests/functional/report_viewer_api/test_get_lines_in_file.py b/web/tests/functional/report_viewer_api/test_get_lines_in_file.py index 754746b0f4..1d1bf219e4 100644 --- a/web/tests/functional/report_viewer_api/test_get_lines_in_file.py +++ b/web/tests/functional/report_viewer_api/test_get_lines_in_file.py @@ -35,7 +35,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ @@ -105,7 +105,7 @@ def test_get_lines_in_source_file_contents(self): Get line content information for multiple files in different positions. """ runid = self._runid - logging.debug('Get line content information from the db for runid: ' + + logging.debug('Get line content information from the db for runid: %s', str(runid)) # Get reports by file to get a file id. diff --git a/web/tests/functional/report_viewer_api/test_get_run_results.py b/web/tests/functional/report_viewer_api/test_get_run_results.py index 080032b98a..ebeda742ec 100644 --- a/web/tests/functional/report_viewer_api/test_get_run_results.py +++ b/web/tests/functional/report_viewer_api/test_get_run_results.py @@ -40,7 +40,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ @@ -87,7 +87,7 @@ def __check_bug_path_order(self, run_results, order): def test_get_run_results_no_filter(self): """ Get all the run results without any filtering. """ runid = self._runid - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid)) run_result_count = self._cc_client.getRunResultCount([runid], @@ -106,7 +106,7 @@ def test_get_run_results_checker_id_and_file_path(self): """ Test if all the bugs are found based on the test project configuration. """ runid = self._runid - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid)) run_result_count = self._cc_client.getRunResultCount([runid], @@ -240,7 +240,7 @@ def test_get_source_file_content_latin1_encoding(self): def test_get_run_results_severity_sort(self): """ Get the run results and sort them by severity and filename ASC. """ runid = self._runid - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid)) sort_mode1 = SortMode(SortType.SEVERITY, Order.ASC) sort_mode2 = SortMode(SortType.FILENAME, Order.ASC) @@ -294,11 +294,11 @@ def test_get_run_results_sorted2(self): """ Get the run results and sort them by file name and checker name ASC. """ runid = self._runid - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid)) - sortMode1 = SortMode(SortType.FILENAME, Order.ASC) - sortMode2 = SortMode(SortType.CHECKER_NAME, Order.ASC) - sort_types = [sortMode1, sortMode2] + sort_mode_1 = SortMode(SortType.FILENAME, Order.ASC) + sort_mode_2 = SortMode(SortType.CHECKER_NAME, Order.ASC) + sort_types = [sort_mode_1, sort_mode_2] run_result_count = self._cc_client.getRunResultCount([runid], ReportFilter(), @@ -344,15 +344,15 @@ def test_get_run_results_sorted2(self): def test_bug_path_length(self): runid = self._runid - sortMode1 = SortMode(SortType.BUG_PATH_LENGTH, Order.ASC) - sortMode2 = SortMode(SortType.BUG_PATH_LENGTH, Order.DESC) + sort_mode_1 = SortMode(SortType.BUG_PATH_LENGTH, Order.ASC) + sort_mode_2 = SortMode(SortType.BUG_PATH_LENGTH, Order.DESC) simple_filter = ReportFilter() unique_filter = ReportFilter(isUnique=True) run_results = self._cc_client.getRunResults([runid], 100, 0, - [sortMode1], + [sort_mode_1], simple_filter, None, False) @@ -361,7 +361,7 @@ def test_bug_path_length(self): run_results = self._cc_client.getRunResults([runid], 100, 0, - [sortMode2], + [sort_mode_2], unique_filter, None, False) diff --git a/web/tests/functional/report_viewer_api/test_hash_clash.py b/web/tests/functional/report_viewer_api/test_hash_clash.py index a2309b2411..0ab695699d 100644 --- a/web/tests/functional/report_viewer_api/test_hash_clash.py +++ b/web/tests/functional/report_viewer_api/test_hash_clash.py @@ -56,7 +56,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): """ Not much setup is needed. Runs and results are automatically generated. @@ -91,7 +91,7 @@ def setup_method(self, method): self._run_name = 'test_hash_clash_' + uuid4().hex codechecker.store(self._codechecker_cfg, self._run_name) - def teardown_method(self, method): + def teardown_method(self, _): """ Remove the run which was stored by this test case. """ @@ -104,7 +104,7 @@ def teardown_method(self, method): def _reports_for_latest_run(self): runs = self._report.getRunData(None, None, 0, None) - max_run_id = max([run.runId for run in runs]) + max_run_id = max(run.runId for run in runs) return self._report.getRunResults([max_run_id], 100, 0, diff --git a/web/tests/functional/report_viewer_api/test_remove_run_results.py b/web/tests/functional/report_viewer_api/test_remove_run_results.py index 15b13d78fa..8899143443 100644 --- a/web/tests/functional/report_viewer_api/test_remove_run_results.py +++ b/web/tests/functional/report_viewer_api/test_remove_run_results.py @@ -37,7 +37,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ diff --git a/web/tests/functional/report_viewer_api/test_report_counting.py b/web/tests/functional/report_viewer_api/test_report_counting.py index 8625774da0..c0eb079622 100644 --- a/web/tests/functional/report_viewer_api/test_report_counting.py +++ b/web/tests/functional/report_viewer_api/test_report_counting.py @@ -44,9 +44,8 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): test_workspace = os.environ['TEST_WORKSPACE'] - self.maxDiff = None test_class = self.__class__.__name__ print('Running ' + test_class + ' tests in ' + test_workspace) diff --git a/web/tests/functional/report_viewer_api/test_report_filter.py b/web/tests/functional/report_viewer_api/test_report_filter.py index d46fc3f473..4b01777f64 100644 --- a/web/tests/functional/report_viewer_api/test_report_filter.py +++ b/web/tests/functional/report_viewer_api/test_report_filter.py @@ -45,7 +45,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ @@ -116,8 +116,9 @@ def test_filter_severity(self): for level in severity_test_data: for severity_level, test_result_count in level.items(): - logging.debug('Severity level filter ' + severity_level + - ' test result count: ' + str(test_result_count)) + logging.debug('Severity level filter %s test result count: %d', + severity_level, + test_result_count) sort_types = None sev = get_severity_level(severity_level) sev_f = ReportFilter(severity=[sev]) @@ -139,8 +140,9 @@ def test_filter_checker_id(self): for level in severity_test_data: for checker_id_filter, test_result_count in level.items(): - logging.debug('Checker id filter ' + checker_id_filter + - ' test result count: ' + str(test_result_count)) + logging.debug('Checker id filter %s test result count: %d', + checker_id_filter, + test_result_count) sort_types = None cid_f = ReportFilter(checkerName=[checker_id_filter]) @@ -165,8 +167,9 @@ def test_filter_file_path(self): for level in severity_test_data: for filepath_filter, test_result_count in level.items(): - logging.debug('File path filter ' + filepath_filter + - ' test result count: ' + str(test_result_count)) + logging.debug('File path filter %s test result count: %d', + filepath_filter, + test_result_count) sort_types = None fp_f = ReportFilter(filepath=[filepath_filter]) @@ -192,8 +195,9 @@ def test_filter_case_insensitive_file_path(self): for level in filter_test_data: for filepath_filter, test_result_count in level.items(): - logging.debug('File path filter ' + filepath_filter + - ' test result count: ' + str(test_result_count)) + logging.debug('File path filter %s test result count: %d', + filepath_filter, + test_result_count) sort_types = None fp_f = ReportFilter(filepath=[filepath_filter]) @@ -259,8 +263,9 @@ def test_filter_review_status(self): for level in severity_test_data: for review_status, test_result_count in level.items(): - logging.debug('Review status ' + review_status + - ' test result count: ' + str(test_result_count)) + logging.debug('Review status %s test result count: %d', + review_status, + test_result_count) sort_types = None status = get_status(review_status) s_f = ReportFilter(reviewStatus=[status]) @@ -293,14 +298,14 @@ def test_filter_unique(self): None, 500, 0, sort_types, unique_filter, None, False) unique_result_count = self._cc_client.getRunResultCount( None, unique_filter, None) - unique_bughash = set([res.bugHash for res in run_results]) + unique_bughash = set(res.bugHash for res in run_results) # Get simple results. run_results = self._cc_client.getRunResults( None, 500, 0, sort_types, simple_filter, None, False) simple_result_count = self._cc_client.getRunResultCount( None, simple_filter, None) - simple_bughash = set([res.bugHash for res in run_results]) + simple_bughash = set(res.bugHash for res in run_results) diff_hashes = list(simple_bughash.difference(unique_bughash)) self.assertEqual(0, len(diff_hashes)) @@ -411,8 +416,9 @@ def test_filter_analyzer_name(self): for level in analyzer_name_test_data: for analyzer_name_filter, test_result_count in level.items(): - logging.debug('Analyzer name filter ' + analyzer_name_filter + - ' test result count: ' + str(test_result_count)) + logging.debug('Analyzer name filter %s test result count: %d', + analyzer_name_filter, + test_result_count) sort_types = None an_f = ReportFilter(analyzerNames=[analyzer_name_filter]) diff --git a/web/tests/functional/report_viewer_api/test_run_data.py b/web/tests/functional/report_viewer_api/test_run_data.py index 18671124eb..cad0cdd923 100644 --- a/web/tests/functional/report_viewer_api/test_run_data.py +++ b/web/tests/functional/report_viewer_api/test_run_data.py @@ -36,7 +36,7 @@ def setup_class(self): def teardown_class(self): teardown_class_common() - def setup_method(self, method): + def setup_method(self, _): test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ @@ -211,26 +211,29 @@ def test_analysis_info(self): checkers = info.checkers - def assertChecker(analyzer, checker): + def assert_checker(analyzer, checker): self.assertTrue(checkers[analyzer][checker].enabled) - def assertNotChecker(analyzer, checker): + def assert_not_checker(analyzer, checker): self.assertFalse(checkers[analyzer][checker].enabled) - assertNotChecker("clangsa", "core.StackAddressEscape") - assertChecker("clangsa", "core.CallAndMessage") - assertChecker("clangsa", "cplusplus.NewDelete") - assertChecker("clangsa", "deadcode.DeadStores") - assertNotChecker("clangsa", "osx.cocoa.Loops") - assertNotChecker("clangsa", "unix.Malloc") - - assertNotChecker("clang-tidy", "bugprone-easily-swappable-parameters") - assertChecker("clang-tidy", "clang-diagnostic-division-by-zero") - assertNotChecker("clang-tidy", "clang-diagnostic-return-type") - assertNotChecker("clang-tidy", "clang-diagnostic-vla") - assertNotChecker("clang-tidy", "llvmlibc-restrict-system-libc-headers") - assertChecker("clang-tidy", "misc-definitions-in-headers") - assertNotChecker("clang-tidy", "objc-super-self") + assert_not_checker("clangsa", "core.StackAddressEscape") + assert_checker("clangsa", "core.CallAndMessage") + assert_checker("clangsa", "cplusplus.NewDelete") + assert_checker("clangsa", "deadcode.DeadStores") + assert_not_checker("clangsa", "osx.cocoa.Loops") + assert_not_checker("clangsa", "unix.Malloc") + + assert_not_checker( + "clang-tidy", "bugprone-easily-swappable-parameters") + assert_checker("clang-tidy", "clang-diagnostic-division-by-zero") + assert_not_checker("clang-tidy", "clang-diagnostic-return-type") + assert_not_checker("clang-tidy", "clang-diagnostic-vla") + assert_not_checker( + "clang-tidy", "llvmlibc-restrict-system-libc-headers") + assert_checker( + "clang-tidy", "misc-definitions-in-headers") + assert_not_checker("clang-tidy", "objc-super-self") self.assertTrue("cppcheck" not in checkers.keys(), "This analysis was run without CppCheck!") diff --git a/web/tests/functional/review_status/test_review_status.py b/web/tests/functional/review_status/test_review_status.py index d4b218cbb2..a1f5c34603 100644 --- a/web/tests/functional/review_status/test_review_status.py +++ b/web/tests/functional/review_status/test_review_status.py @@ -111,7 +111,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self.test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ @@ -137,7 +137,7 @@ def setup_method(self, method): 'with the given name configured at the test init.') self._runid = test_runs[0].runId - def teardown_method(self, method): + def teardown_method(self, _): """ Remove all review status rules after each test cases. """ self.__remove_all_rules() @@ -344,7 +344,7 @@ def test_review_status_update_from_source_trim(self): runs = self._cc_client.getRunData(run_filter, None, 0, None) run = runs[0] runid = run.runId - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid)) reports = get_all_run_results(self._cc_client, runid) @@ -554,46 +554,46 @@ def test_review_status_changes(self): # comment shouldn't change and the one without source code comment # should. - NULL_DEREF_BUG_HASH = '0c07579523063acece2d7aebd4357cac' - UNCOMMENTED_BUG_HASH = 'f0bf9810fe405de502137f1eb71fb706' - MULTI_REPORT_HASH = '2d019b15c17a7cf6aa3b238b916872ba' + null_deref_bug_hash = '0c07579523063acece2d7aebd4357cac' + uncommented_bug_hash = 'f0bf9810fe405de502137f1eb71fb706' + multi_report_hash = '2d019b15c17a7cf6aa3b238b916872ba' self._cc_client.addReviewStatusRule( - NULL_DEREF_BUG_HASH, + null_deref_bug_hash, ReviewStatus.INTENTIONAL, "This is intentional") self._cc_client.addReviewStatusRule( - UNCOMMENTED_BUG_HASH, + uncommented_bug_hash, ReviewStatus.INTENTIONAL, "This is intentional") self._cc_client.addReviewStatusRule( - MULTI_REPORT_HASH, + multi_report_hash, ReviewStatus.FALSE_POSITIVE, "This is false positive.") reports1 = get_all_run_results(self._cc_client, runid1) null_deref_report1 = next(filter( - lambda r: r.bugHash == NULL_DEREF_BUG_HASH, + lambda r: r.bugHash == null_deref_bug_hash, reports1)) uncommented_report1 = next(filter( - lambda r: r.bugHash == UNCOMMENTED_BUG_HASH, + lambda r: r.bugHash == uncommented_bug_hash, reports1)) multi_confirmed_report1 = next(filter( - lambda r: r.bugHash == MULTI_REPORT_HASH and + lambda r: r.bugHash == multi_report_hash and r.reviewData.status == ReviewStatus.CONFIRMED, reports1)) multi_intentional_report1 = next(filter( - lambda r: r.bugHash == MULTI_REPORT_HASH and + lambda r: r.bugHash == multi_report_hash and r.reviewData.status == ReviewStatus.INTENTIONAL, reports1)) multi_unreviewed_report1 = next(filter( - lambda r: r.bugHash == MULTI_REPORT_HASH and + lambda r: r.bugHash == multi_report_hash and r.reviewData.status == ReviewStatus.UNREVIEWED, reports1), None) multi_false_positive_report1 = next(filter( - lambda r: r.bugHash == MULTI_REPORT_HASH and + lambda r: r.bugHash == multi_report_hash and r.reviewData.status == ReviewStatus.FALSE_POSITIVE, reports1)) @@ -624,7 +624,7 @@ def test_review_status_changes(self): self.assertIsNone(multi_unreviewed_report1) rule_filter = ReviewStatusRuleFilter( - reportHashes=[UNCOMMENTED_BUG_HASH]) + reportHashes=[uncommented_bug_hash]) review_status_rule_before = self._cc_client.getReviewStatusRules( rule_filter, None, None, 0)[0] @@ -638,7 +638,7 @@ def test_review_status_changes(self): codechecker.store(codechecker_cfg, test_project_name2) rule_filter = ReviewStatusRuleFilter( - reportHashes=[UNCOMMENTED_BUG_HASH]) + reportHashes=[uncommented_bug_hash]) review_status_rule_after = self._cc_client.getReviewStatusRules( rule_filter, None, None, 0)[0] @@ -653,26 +653,26 @@ def test_review_status_changes(self): reports2 = get_all_run_results(self._cc_client, runid2) null_deref_report2 = next(filter( - lambda r: r.bugHash == NULL_DEREF_BUG_HASH, + lambda r: r.bugHash == null_deref_bug_hash, reports2)) uncommented_report2 = next(filter( - lambda r: r.bugHash == UNCOMMENTED_BUG_HASH, + lambda r: r.bugHash == uncommented_bug_hash, reports2)) multi_confirmed_report2 = next(filter( - lambda r: r.bugHash == MULTI_REPORT_HASH and + lambda r: r.bugHash == multi_report_hash and r.reviewData.status == ReviewStatus.CONFIRMED, reports2)) multi_intentional_report2 = next(filter( - lambda r: r.bugHash == MULTI_REPORT_HASH and + lambda r: r.bugHash == multi_report_hash and r.reviewData.status == ReviewStatus.INTENTIONAL, reports2)) multi_unreviewed_report2 = next(filter( - lambda r: r.bugHash == MULTI_REPORT_HASH and + lambda r: r.bugHash == multi_report_hash and r.reviewData.status == ReviewStatus.UNREVIEWED, reports2), None) multi_false_positive_report2 = next(filter( - lambda r: r.bugHash == MULTI_REPORT_HASH and + lambda r: r.bugHash == multi_report_hash and r.reviewData.status == ReviewStatus.FALSE_POSITIVE, reports2)) @@ -714,13 +714,13 @@ def test_review_status_changes(self): # for reports without source code comment. rule_filter = ReviewStatusRuleFilter( - reportHashes=[UNCOMMENTED_BUG_HASH]) + reportHashes=[uncommented_bug_hash]) self._cc_client.removeReviewStatusRules(rule_filter) reports1 = get_all_run_results(self._cc_client, runid1) uncommented_report1 = next(filter( - lambda r: r.bugHash == UNCOMMENTED_BUG_HASH, + lambda r: r.bugHash == uncommented_bug_hash, reports1)) self.assertIsNone(uncommented_report1.fixedAt) @@ -728,7 +728,7 @@ def test_review_status_changes(self): reports2 = get_all_run_results(self._cc_client, runid2) uncommented_report2 = next(filter( - lambda r: r.bugHash == UNCOMMENTED_BUG_HASH, + lambda r: r.bugHash == uncommented_bug_hash, reports2)) self.assertIsNone(uncommented_report2.fixedAt) @@ -780,10 +780,11 @@ def setup_test_project(version): """ os.makedirs(project_path, exist_ok=True) - with open(os.path.join(project_path, "main.c"), "w") as f: + with open(os.path.join(project_path, "main.c"), "w", + encoding='utf-8') as f: f.write(sources[version]) - with open(build_json_path, "w") as f: + with open(build_json_path, "w", encoding='utf-8') as f: f.write(build_json) codechecker_cfg = env.import_codechecker_cfg(self.test_workspace) diff --git a/web/tests/functional/run_tag/test_run_tag.py b/web/tests/functional/run_tag/test_run_tag.py index 1bad0e8549..460685f537 100644 --- a/web/tests/functional/run_tag/test_run_tag.py +++ b/web/tests/functional/run_tag/test_run_tag.py @@ -72,7 +72,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] @@ -131,7 +131,7 @@ def setup_method(self, method): }"""] self.tags = ['v1.0', 'v1.1', 'v1.2'] - def teardown_method(self, method): + def teardown_method(self, _): """Restore environment after tests have ran.""" os.chdir(self.__old_pwd) diff --git a/web/tests/functional/server_configuration/test_server_configuration.py b/web/tests/functional/server_configuration/test_server_configuration.py index d3e801b50d..158098e68e 100644 --- a/web/tests/functional/server_configuration/test_server_configuration.py +++ b/web/tests/functional/server_configuration/test_server_configuration.py @@ -81,7 +81,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): """ Setup Configuration for tests. """ @@ -122,8 +122,8 @@ def test_auth_su_notification_edit(self): Test that SUPERADMINS can edit the notification text. """ # Create a SUPERUSER login. - self.sessionToken = self.auth_client.performLogin("Username:Password", - "root:root") + self.session_token = self.auth_client.performLogin( + "Username:Password", "root:root") ret = self.auth_client.addPermission(Permission.SUPERUSER, "root", @@ -134,11 +134,11 @@ def test_auth_su_notification_edit(self): su_auth_client = \ env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) su_config_client = \ env.setup_config_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) user = su_auth_client.getLoggedInUser() self.assertEqual(user, "root") @@ -154,16 +154,16 @@ def test_auth_non_su_notification_edit(self): """ Test that non SUPERADMINS can't edit the notification text. """ - self.sessionToken = self.auth_client.performLogin("Username:Password", - "cc:test") + self.session_token = self.auth_client.performLogin( + "Username:Password", "cc:test") authd_auth_client = \ env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) authd_config_client = \ env.setup_config_client(self._test_workspace, - session_token=self.sessionToken) + session_token=self.session_token) user = authd_auth_client.getLoggedInUser() self.assertEqual(user, "cc") diff --git a/web/tests/functional/skip/test_skip.py b/web/tests/functional/skip/test_skip.py index f4e2436d9f..e3190cd7d7 100644 --- a/web/tests/functional/skip/test_skip.py +++ b/web/tests/functional/skip/test_skip.py @@ -153,7 +153,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py self.test_workspace = os.environ['TEST_WORKSPACE'] @@ -189,7 +189,7 @@ def test_skip(self): """ There should be no results from the skipped file. """ runid = self._runid - logging.debug('Get all run results from the db for runid: ' + + logging.debug('Get all run results from the db for runid: %s', str(runid)) run_results = get_all_run_results(self._cc_client, runid) @@ -233,9 +233,7 @@ def test_skip(self): if not bug['checker'].startswith('clang-diagnostic-'): self.assertIn(bug['file'], skipped_files) else: - self.assertTrue(True, - "There should be missing results because" - "using skip") + self.fail("There should be missing results because using skip") self.assertEqual(len(run_results), len(test_proj_res) - len(skipped)) diff --git a/web/tests/functional/source_change/test_source_change.py b/web/tests/functional/source_change/test_source_change.py index afd21f0640..1c9ad118f7 100644 --- a/web/tests/functional/source_change/test_source_change.py +++ b/web/tests/functional/source_change/test_source_change.py @@ -114,7 +114,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] diff --git a/web/tests/functional/ssl/test_ssl.py b/web/tests/functional/ssl/test_ssl.py index 22c6c96d6e..056620bbaf 100644 --- a/web/tests/functional/ssl/test_ssl.py +++ b/web/tests/functional/ssl/test_ssl.py @@ -32,7 +32,6 @@ def setup_class(self): # Stopping event for CodeChecker server. global __STOP_SERVER - # pylint: disable=no-member multiprocess module members. __STOP_SERVER = multiprocess.Event() global TEST_WORKSPACE @@ -87,7 +86,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # Get the test workspace used to authentication tests. self._test_workspace = os.environ['TEST_WORKSPACE'] @@ -136,9 +135,9 @@ def test_privileged_access(self): self._test_cfg['codechecker_cfg']['check_env'], access_protocol) - self.sessionToken = auth_client.performLogin("Username:Password", - "cc:test") - self.assertIsNotNone(self.sessionToken, + self.session_token = auth_client.performLogin( + "Username:Password", "cc:test") + self.assertIsNotNone(self.session_token, "Valid credentials didn't give us a token!") handshake = auth_client.getAuthParameters() @@ -149,7 +148,7 @@ def test_privileged_access(self): "Valid session was " + "reported not to be active.") client = env.setup_viewer_client(self._test_workspace, - session_token=self.sessionToken, + session_token=self.session_token, proto=access_protocol) self.assertIsNotNone(client.getPackageVersion(), @@ -157,13 +156,13 @@ def test_privileged_access(self): authd_auth_client = \ env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken, + session_token=self.session_token, proto=access_protocol) user = authd_auth_client.getLoggedInUser() self.assertEqual(user, "cc") auth_client = env.setup_auth_client(self._test_workspace, - session_token=self.sessionToken, + session_token=self.session_token, proto=access_protocol) result = auth_client.destroySession() diff --git a/web/tests/functional/statistics/test_statistics.py b/web/tests/functional/statistics/test_statistics.py index e7554b8874..03c49733f5 100644 --- a/web/tests/functional/statistics/test_statistics.py +++ b/web/tests/functional/statistics/test_statistics.py @@ -10,6 +10,7 @@ """ statistics collector feature test. """ +# pylint: disable=deprecated-module from distutils import util import os import shutil @@ -93,7 +94,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] @@ -112,7 +113,7 @@ def setup_method(self, method): # Get if the package is able to collect statistics or not. cmd = [self._codechecker_cmd, 'analyze', '-h'] - output, _ = call_command(cmd, cwd=test_workspace, env=self.env) + output, _ = call_command(cmd, cwd=test_workspace, environ=self.env) self.stats_capable = '--stats' in output print("'analyze' reported statistics collector-compatibility? " + str(self.stats_capable)) @@ -131,7 +132,7 @@ def setup_method(self, method): # Clean the test project before logging the compiler commands. output, err = call_command(test_project_clean, cwd=test_project_path, - env=self.env) + environ=self.env) print(output) print(err) @@ -141,7 +142,7 @@ def setup_method(self, method): log_cmd.extend(test_project_build) output, err = call_command(log_cmd, cwd=test_project_path, - env=self.env) + environ=self.env) print(output) print(err) @@ -156,7 +157,10 @@ def test_stats(self): cmd = [self._codechecker_cmd, 'analyze', '-o', 'reports', '--stats', 'compile_command.json'] - output, err = call_command(cmd, cwd=test_project_path, env=self.env) + output, err = call_command( + cmd, + cwd=test_project_path, + environ=self.env) print(output) print(err) collect_msg = "Collecting data for statistical analysis." @@ -174,7 +178,10 @@ def test_stats_collect(self): stats_dir = os.path.join(test_project_path, 'stats') cmd = [self._codechecker_cmd, 'analyze', '--stats-collect', stats_dir, 'compile_command.json', '-o', 'reports'] - output, err = call_command(cmd, cwd=test_project_path, env=self.env) + output, err = call_command( + cmd, + cwd=test_project_path, + environ=self.env) print(output) print(err) analyze_msg = "Starting static analysis" @@ -198,7 +205,10 @@ def test_stats_collect_params(self): '--stats-min-sample-count', '10', '--stats-relevance-threshold', '0.8', '-o', 'reports'] - output, err = call_command(cmd, cwd=test_project_path, env=self.env) + output, err = call_command( + cmd, + cwd=test_project_path, + environ=self.env) print(output) print(err) analyze_msg = "Starting static analysis" @@ -223,7 +233,7 @@ def test_stats_use(self): stats_dir = os.path.join(test_project_path, 'stats') cmd = [self._codechecker_cmd, 'analyze', '--stats-collect', stats_dir, 'compile_command.json', '-o', 'reports'] - out, err = call_command(cmd, cwd=test_project_path, env=self.env) + out, err = call_command(cmd, cwd=test_project_path, environ=self.env) print(out) print(err) @@ -232,7 +242,10 @@ def test_stats_use(self): cmd = [self._codechecker_cmd, 'analyze', '--stats-use', stats_dir, 'compile_command.json', '-o', 'reports'] - output, err = call_command(cmd, cwd=test_project_path, env=self.env) + output, err = call_command( + cmd, + cwd=test_project_path, + environ=self.env) print(output) print(err) self.assertIn(analyze_msg, output) diff --git a/web/tests/functional/storage_of_analysis_statistics/test_storage_of_analysis_statistics.py b/web/tests/functional/storage_of_analysis_statistics/test_storage_of_analysis_statistics.py index e87d6ce9bc..39b81eb810 100644 --- a/web/tests/functional/storage_of_analysis_statistics/test_storage_of_analysis_statistics.py +++ b/web/tests/functional/storage_of_analysis_statistics/test_storage_of_analysis_statistics.py @@ -48,7 +48,6 @@ def setup_class(self): # Stopping event for CodeChecker server. global EVENT_1 - # pylint: disable=no-member multiprocess module members. EVENT_1 = multiprocess.Event() global TEST_WORKSPACE @@ -109,7 +108,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # Get the test workspace. self.test_workspace = os.environ['TEST_WORKSPACE'] @@ -178,7 +177,7 @@ def setup_method(self, method): xxx // Will cause a compilation error }"""] - def teardown_method(self, method): + def teardown_method(self, _): """Restore environment after tests have ran.""" os.chdir(self.__old_pwd) diff --git a/web/tests/functional/store/test_store.py b/web/tests/functional/store/test_store.py index d4cd7473a7..3ea8bbdad1 100644 --- a/web/tests/functional/store/test_store.py +++ b/web/tests/functional/store/test_store.py @@ -17,7 +17,6 @@ import shutil import inspect import plistlib -import shutil import subprocess import unittest @@ -29,7 +28,7 @@ from libtest import plist_test -def _call_cmd(command, cwd=None, env=None): +def _call_cmd(command, cwd=None, environ=None): try: print(' '.join(command)) proc = subprocess.Popen( @@ -37,7 +36,7 @@ def _call_cmd(command, cwd=None, env=None): cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - env=env, encoding="utf-8", errors="ignore") + env=environ, encoding="utf-8", errors="ignore") out, err = proc.communicate() return proc.returncode, out, err except subprocess.CalledProcessError as cerr: @@ -118,7 +117,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): # Get the test workspace used to cppcheck tests. self._test_workspace = os.environ["TEST_WORKSPACE"] @@ -413,7 +412,7 @@ def test_same_headers(self): def create_build_json(source_file): build_json = \ os.path.join(self._same_headers_workspace, 'build.json') - with open(build_json, 'w') as f: + with open(build_json, 'w', encoding='utf-8') as f: json.dump([{ 'directory': '.', 'command': f'g++ -c {source_file}', diff --git a/web/tests/functional/suppress/test_suppress_generation.py b/web/tests/functional/suppress/test_suppress_generation.py index 733f55cb9b..311c2f4923 100644 --- a/web/tests/functional/suppress/test_suppress_generation.py +++ b/web/tests/functional/suppress/test_suppress_generation.py @@ -64,7 +64,7 @@ def _generate_suppress_file(suppress_file): s_file.close() -def call_cmd(command, cwd, env): +def call_cmd(command, cwd, environ): try: print(' '.join(command)) proc = subprocess.Popen( @@ -72,7 +72,7 @@ def call_cmd(command, cwd, env): cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - env=env, encoding="utf-8", errors="ignore") + env=environ, encoding="utf-8", errors="ignore") out, err = proc.communicate() print(out) print(err) @@ -171,7 +171,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self._test_workspace = os.environ['TEST_WORKSPACE'] self._testproject_data = env.setup_test_proj_cfg(self._test_workspace) @@ -237,7 +237,7 @@ def test_suppress_comment_in_db(self): Exported source suppress comment stored as a review status in the db. """ runid = self._runid - logging.debug("Get all run results from the db for runid: " + + logging.debug("Get all run results from the db for runid: %s", str(runid)) expected_file_path = os.path.join(self._test_directory, @@ -272,13 +272,13 @@ def test_suppress_comment_in_db(self): run_results = get_all_run_results(self._cc_client, runid) logging.debug("Run results:") - [logging.debug(x) for x in run_results] + for run_result in run_results: + logging.debug(run_result) self.assertIsNotNone(run_results) self.assertNotEqual(len(run_results), 0) - for bug_hash in hash_to_suppress_msgs: - logging.debug("tesing for bug hash " + bug_hash) - expected_data = hash_to_suppress_msgs[bug_hash] + for bug_hash, expected_data in hash_to_suppress_msgs.items(): + logging.debug("tesing for bug hash %s", bug_hash) report_data_of_bug = [ report_data for report_data in run_results if report_data.bugHash == bug_hash] @@ -364,8 +364,7 @@ def test_suppress_comment_in_db(self): self.assertIsNotNone(updated_results) self.assertNotEqual(len(updated_results), 0) - for bug_hash in hash_to_suppress_msgs: - expected_data = hash_to_suppress_msgs[bug_hash] + for bug_hash, expected_data in hash_to_suppress_msgs.items(): report_data = [report_data for report_data in updated_results if report_data.bugHash == bug_hash][0] diff --git a/web/tests/functional/update/test_update_mode.py b/web/tests/functional/update/test_update_mode.py index c4f204fdea..8a3f0cae98 100644 --- a/web/tests/functional/update/test_update_mode.py +++ b/web/tests/functional/update/test_update_mode.py @@ -102,7 +102,7 @@ def teardown_class(self): print("Removing: " + TEST_WORKSPACE) shutil.rmtree(TEST_WORKSPACE, ignore_errors=True) - def setup_method(self, method): + def setup_method(self, _): self._test_workspace = os.environ.get('TEST_WORKSPACE') test_class = self.__class__.__name__ diff --git a/web/tests/libtest/codechecker.py b/web/tests/libtest/codechecker.py index d952e32ea0..846011e05e 100644 --- a/web/tests/libtest/codechecker.py +++ b/web/tests/libtest/codechecker.py @@ -32,7 +32,7 @@ ("admin", Permission.PRODUCT_ADMIN)] -def call_command(cmd, cwd, env): +def call_command(cmd, cwd, environ): """ Execute a process in a test case. If the run is successful do not bloat the test output, but in case of any failure dump stdout and stderr. @@ -49,14 +49,14 @@ def show(out, err): stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, - env=env, + env=environ, encoding="utf-8", errors="ignore") out, err = proc.communicate() if proc.returncode == 1: show(out, err) print('Unsuccessful run: "' + ' '.join(cmd) + '"') - raise Exception("Unsuccessful run of command.") + raise OSError("Unsuccessful run of command.") return out, err except OSError: show(out, err) @@ -663,12 +663,12 @@ def wait_for_server_start(stdoutfile): return if n > server_start_timeout.total_seconds(): - print("[FATAL!] Server failed to start after '%s' (%d seconds). " - "There is likely a major issue preventing startup!" - % (str(server_start_timeout), - server_start_timeout.total_seconds())) + print("[FATAL!] Server failed to start after " + f"'{str(server_start_timeout)}' " + f"({server_start_timeout.total_seconds()} seconds). " + "There is likely a major issue preventing startup!") if os.path.isfile(stdoutfile): - with open(stdoutfile) as f: + with open(stdoutfile, encoding='utf-8') as f: print("*** HERE FOLLOWS THE OUTPUT OF THE 'server' " "COMMAND! ***") print(f.read()) @@ -714,7 +714,6 @@ def start_server_proc(event, server_cmd, checking_env): pg_config, server_args or []) - # pylint: disable=no-member multiprocess module members. server_proc = multiprocess.Process( name='server', target=start_server_proc, @@ -734,7 +733,7 @@ def start_server_proc(event, server_cmd, checking_env): def add_test_package_product(server_data, test_folder, check_env=None, protocol='http', report_limit=None, - user_permissions=DEFAULT_USER_PERMISSIONS): + user_permissions=None): """ Add a product for a test suite to the server provided by server_data. Server must be running before called. @@ -742,6 +741,9 @@ def add_test_package_product(server_data, test_folder, check_env=None, server_data must contain three keys: viewer_{host, port, product}. """ + if user_permissions is None: + user_permissions = DEFAULT_USER_PERMISSIONS + if not check_env: check_env = env.test_env(test_folder) @@ -812,7 +814,7 @@ def add_test_package_product(server_data, test_folder, check_env=None, False, extra_params) if not ret: - raise Exception("Failed to add permission to " + user) + raise RuntimeError("Failed to add permission to " + user) logout(codechecker_cfg, test_folder, protocol) @@ -821,7 +823,7 @@ def add_test_package_product(server_data, test_folder, check_env=None, login(codechecker_cfg, test_folder, "cc", "test", protocol) if returncode: - raise Exception("Failed to add the product to the test server!") + raise RuntimeError("Failed to add the product to the test server!") def remove_test_package_product(test_folder, check_env=None, protocol='http', @@ -867,7 +869,8 @@ def remove_test_package_product(test_folder, check_env=None, protocol='http', env.del_database(product_to_remove, check_env) if returncode: - raise Exception("Failed to remove the product from the test server!") + raise RuntimeError( + "Failed to remove the product from the test server!") def _pg_db_config_to_cmdline_params(pg_db_config): diff --git a/web/tests/libtest/env.py b/web/tests/libtest/env.py index b6642771fc..1610db8bef 100644 --- a/web/tests/libtest/env.py +++ b/web/tests/libtest/env.py @@ -101,17 +101,17 @@ def del_database(dbname, env=None): if pg_config: pg_config['dbname'] = dbname - remove_cmd = """ + remove_cmd = f""" UPDATE pg_database SET datallowconn='false' - WHERE datname='{0}'; + WHERE datname='{dbname}'; SELECT pg_terminate_backend(pid) FROM pg_stat_activity - WHERE datname='{0}'; + WHERE datname='{dbname}'; - DROP DATABASE "{0}"; - """.format(dbname) + DROP DATABASE "{dbname}"; + """ with tempfile.NamedTemporaryFile(suffix='.sql') as sql_file: sql_file.write(remove_cmd.encode('utf-8')) diff --git a/web/tests/libtest/thrift_client_to_db.py b/web/tests/libtest/thrift_client_to_db.py index b1b4606573..de7788c929 100644 --- a/web/tests/libtest/thrift_client_to_db.py +++ b/web/tests/libtest/thrift_client_to_db.py @@ -135,18 +135,18 @@ def __init__(self, protocol, host, port, product, endpoint, if session_token: headers = {'Cookie': SESSION_COOKIE_NAME + '=' + session_token} transport.setCustomHeaders(headers) - super(CCViewerHelper, self).__init__(transport, - client, auto_handle_connection) + super().__init__(transport, client, auto_handle_connection) def __getattr__(self, attr): - is_getAll = re.match(r'(get)All(.*)$', attr) - if is_getAll: - func_name = is_getAll.group(1) + is_getAll.group(2) - return partial(self._getAll_emu, func_name) + is_get_all = re.match(r'(get)All(.*)$', attr) + + if is_get_all: + func_name = is_get_all.group(1) + is_get_all.group(2) + return partial(self._get_all_emu, func_name) else: return partial(self._thrift_client_call, attr) - def _getAll_emu(self, func_name, *args): + def _get_all_emu(self, func_name, *args): """ Do not call the getAll* functions with keyword arguments, limit and offset must be the -4. / -3. positional arguments @@ -185,8 +185,7 @@ def __init__(self, proto, host, port, uri, auto_handle_connection=True, if session_token: headers = {'Cookie': SESSION_COOKIE_NAME + '=' + session_token} transport.setCustomHeaders(headers) - super(CCAuthHelper, self).__init__(transport, - client, auto_handle_connection) + super().__init__(transport, client, auto_handle_connection) def __getattr__(self, attr): return partial(self._thrift_client_call, attr) @@ -211,8 +210,7 @@ def __init__(self, proto, host, port, product, if session_token: headers = {'Cookie': SESSION_COOKIE_NAME + '=' + session_token} transport.setCustomHeaders(headers) - super(CCProductHelper, self).__init__(transport, - client, auto_handle_connection) + super().__init__(transport, client, auto_handle_connection) def __getattr__(self, attr): return partial(self._thrift_client_call, attr) @@ -234,8 +232,7 @@ def __init__(self, proto, host, port, uri, auto_handle_connection=True, if session_token: headers = {'Cookie': SESSION_COOKIE_NAME + '=' + session_token} transport.setCustomHeaders(headers) - super(CCConfigHelper, self).__init__(transport, - client, auto_handle_connection) + super().__init__(transport, client, auto_handle_connection) def __getattr__(self, attr): return partial(self._thrift_client_call, attr) diff --git a/web/tests/tools/__init__.py b/web/tests/tools/__init__.py new file mode 100644 index 0000000000..4259749345 --- /dev/null +++ b/web/tests/tools/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# +# Part of the CodeChecker project, under the Apache License v2.0 with +# LLVM Exceptions. See LICENSE for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# +# ------------------------------------------------------------------------- diff --git a/web/tests/tools/add_unicode_comment.py b/web/tests/tools/add_unicode_comment.py index 27faae6cda..e1ee4fbe7e 100644 --- a/web/tests/tools/add_unicode_comment.py +++ b/web/tests/tools/add_unicode_comment.py @@ -32,10 +32,10 @@ def add_comment_to_file(args, file_path): if args.remove: - print('Removing comments from %s' % file_path) + print(f'Removing comments from {file_path}') full_comment = '' else: - print('Adding the comment to %s' % file_path) + print(f'Adding the comment to {file_path}') full_comment = '\n' + COMMENT_BEGIN + COMMENT + COMMENT_END + '\n' with open(file_path, 'r+', encoding="utf-8", errors="ignore") as handle: @@ -85,7 +85,7 @@ def main(): elif os.path.isdir(full_path): add_comment_to_directory(args, full_path) else: - print('%s is not a valid file or directory.' % path) + print(f'{path} is not a valid file or directory.') else: add_comment_to_directory(args, os.getcwd())