From d6ca671a8f51a82cce4ae2c663df575e8abf51f6 Mon Sep 17 00:00:00 2001 From: Wolfgang Kulhanek Date: Thu, 9 Oct 2025 15:40:31 +0200 Subject: [PATCH] Add checks --- .github/workflows/static-checks-pr.yml | 34 ++ .github/workflows/static-checks-push.yml | 34 ++ tests/static/.coveragerc | 20 + tests/static/.flake8 | 5 + tests/static/.pylintrc | 385 +++++++++++++++++++ tests/static/.yamllint | 81 ++++ tests/static/pytest.ini | 12 + tests/static/requirements.txt | 8 + tests/static/setup.py | 463 +++++++++++++++++++++++ tests/static/syntax-check.sh | 232 ++++++++++++ tests/static/test-requirements.txt | 57 +++ tests/static/tox-inventory.txt | 67 ++++ tests/static/tox-inventory.txt_extras | 3 + tests/static/tox.ini | 19 + 14 files changed, 1420 insertions(+) create mode 100644 .github/workflows/static-checks-pr.yml create mode 100644 .github/workflows/static-checks-push.yml create mode 100644 tests/static/.coveragerc create mode 100644 tests/static/.flake8 create mode 100644 tests/static/.pylintrc create mode 100644 tests/static/.yamllint create mode 100644 tests/static/pytest.ini create mode 100644 tests/static/requirements.txt create mode 100644 tests/static/setup.py create mode 100755 tests/static/syntax-check.sh create mode 100644 tests/static/test-requirements.txt create mode 100644 tests/static/tox-inventory.txt create mode 100644 tests/static/tox-inventory.txt_extras create mode 100644 tests/static/tox.ini diff --git a/.github/workflows/static-checks-pr.yml b/.github/workflows/static-checks-pr.yml new file mode 100644 index 0000000..6874cd9 --- /dev/null +++ b/.github/workflows/static-checks-pr.yml @@ -0,0 +1,34 @@ +--- +name: Static Checks Pull Request + +on: + pull_request: + branches: + - main + +jobs: + static-checks: + name: Run static tests using tox + runs-on: ubuntu-latest + strategy: + matrix: + python-version: + - '3.12' + + steps: + - name: Get Repository Code + id: get_repo_code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Run static checks + run: | + pip install tox + tox -c tests/static -- ${{ github.base_ref }} ${{ github.sha }} +... \ No newline at end of file diff --git a/.github/workflows/static-checks-push.yml b/.github/workflows/static-checks-push.yml new file mode 100644 index 0000000..fe34777 --- /dev/null +++ b/.github/workflows/static-checks-push.yml @@ -0,0 +1,34 @@ +--- +name: Static Checks Push + +on: + push: + branches: + - main + tags: + - v* + +jobs: + static-checks: + name: Run static tests using tox + runs-on: ubuntu-latest + strategy: + matrix: + python-version: + - '3.12' + + steps: + - name: Get Repository Code + id: get_repo_code + uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Run static checks + run: | + pip install tox + tox -c tests/static +... \ No newline at end of file diff --git a/tests/static/.coveragerc b/tests/static/.coveragerc new file mode 100644 index 0000000..ad7893b --- /dev/null +++ b/tests/static/.coveragerc @@ -0,0 +1,20 @@ +[run] +branch = True +omit = + */lib/python*/site-packages/* + */lib/python*/* + /usr/* + */setup.py + # TODO(rhcarvalho): this is used to ignore test files from coverage report. + # We can make this less generic when we stick with a single test pattern in + # the repo. + */conftest.py + */test_*.py + */*_tests.py + */test/* + +[report] +fail_under = 28 + +[html] +directory = cover diff --git a/tests/static/.flake8 b/tests/static/.flake8 new file mode 100644 index 0000000..cce460d --- /dev/null +++ b/tests/static/.flake8 @@ -0,0 +1,5 @@ +[flake8] +# TODO: cleanup flake8 issues with utils/test/* +exclude=.tox,inventory +max_line_length = 120 +ignore = E501,T003 diff --git a/tests/static/.pylintrc b/tests/static/.pylintrc new file mode 100644 index 0000000..80c74b7 --- /dev/null +++ b/tests/static/.pylintrc @@ -0,0 +1,385 @@ +[MASTER] +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS,setup.py + +# Pickle collected data for later comparisons. +persistent=no + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +# Zero means use the total number of CPUs. +jobs=0 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Allow optimization of some AST trees. This will activate a peephole AST +# optimizer, which will apply various small optimizations. For instance, it can +# be used to obtain the result of joining multiple strings with the addition +# operator. Joining a lot of strings can lead to a maximum recursion error in +# Pylint and this flag can prevent that. It has one side effect, the resulting +# AST will be different than the one from reality. +optimize-ast=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=fixme,locally-disabled,file-ignored,duplicate-code + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=parseable + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells whether to display a full report or only the messages +reports=no + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=yes + + +[BASIC] + +# List of builtins function names that should not be used, separated by a comma +bad-functions=map,filter,input + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for function names +function-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for attribute names +attr-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for argument names +argument-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for method names +method-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[ELIF] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +# Ignoring ansible.constants to suppress `no-member` warnings +ignored-modules=ansible.constants + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). This supports can work +# with qualified names. +ignored-classes= + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: en_ZW (myspell), en_NG +# (myspell), en_NA (myspell), en_NZ (myspell), en_PH (myspell), en_AG +# (myspell), en_BW (myspell), en_IE (myspell), en_ZM (myspell), en_DK +# (myspell), en_CA (myspell), en_GH (myspell), en_IN (myspell), en_BZ +# (myspell), en_MW (myspell), en_TT (myspell), en_JM (myspell), en_GB +# (myspell), en_ZA (myspell), en_SG (myspell), en_AU (myspell), en_US +# (myspell), en_BS (myspell), en_HK (myspell). +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=120 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_$|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=20 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/tests/static/.yamllint b/tests/static/.yamllint new file mode 100644 index 0000000..ee97c8f --- /dev/null +++ b/tests/static/.yamllint @@ -0,0 +1,81 @@ +# -*- mode: yaml -*- +# vim:ts=2:et:sw=2:ai:si:syntax=yaml +# +# yamllint configuration directives +# Project Homepage: https://github.com/adrienverge/yamllint +# +# Overriding rules in files: +# http://yamllint.readthedocs.io/en/latest/disable_with_comments.html +--- +extends: default + +# Rules documentation: http://yamllint.readthedocs.io/en/latest/rules.html +rules: + + braces: + # Defaults + # min-spaces-inside: 0 + # max-spaces-inside: 0 + + # Keeping 0 min-spaces to not error on empty collection definitions + min-spaces-inside: 0 + # Allowing one space inside braces to improve code readability + max-spaces-inside: 1 + + brackets: + # Defaults + # min-spaces-inside: 0 + # max-spaces-inside: 0 + + # Keeping 0 min-spaces to not error on empty collection definitions + min-spaces-inside: 0 + # Allowing one space inside braces to improve code readability + max-spaces-inside: 1 + + comments: + # Defaults + # level: warning + # require-starting-space: true + # min-spaces-from-content: 2 + + # Disabling to allow for code comment blocks and #!/usr/bin/ansible-playbook + require-starting-space: false + + # Allow in-line comments to be one space from content + min-spaces-from-content: 1 + + # Allow no space after comment marker to distinguish example variables from + # descriptive comments. + comments-indentation: disable + + # Do not require document start marker + document-start: false + + indentation: + # Defaults + # spaces: consistent + # indent-sequences: true + # check-multi-line-strings: false + + # Require 2 space indentation + spaces: 2 + + # Require consistent indentation within a file, either indented or not + indent-sequences: consistent + + line-length: + # Defaults + # max: 80 + # allow-non-breakable-words: true + # allow-non-breakable-inline-mappings: false + + # Lines should generally be kept to under 150 characters. Comments in + # code can change this setting for individual files. + max: 150 + + # Allow long lines in the case where reasonable line breaks are not + # possible, such as URLs or encoded strings. + allow-non-breakable-inline-mappings: false + + # Do not require new line at end of files + new-line-at-end-of-file: false diff --git a/tests/static/pytest.ini b/tests/static/pytest.ini new file mode 100644 index 0000000..abc90e0 --- /dev/null +++ b/tests/static/pytest.ini @@ -0,0 +1,12 @@ +[pytest] +norecursedirs = + .* + __pycache__ + cover + docs +python_files = + test*.py +addopts = + --cov=. + --cov-report=term +--cov-report=html diff --git a/tests/static/requirements.txt b/tests/static/requirements.txt new file mode 100644 index 0000000..2b09736 --- /dev/null +++ b/tests/static/requirements.txt @@ -0,0 +1,8 @@ +ansible==12.1.0 +tox +flake8 +pylint +pytest +PyYAML +yamllint +setuptools-lint diff --git a/tests/static/setup.py b/tests/static/setup.py new file mode 100644 index 0000000..e041ae1 --- /dev/null +++ b/tests/static/setup.py @@ -0,0 +1,463 @@ +"""A setuptools based setup module. + +""" +from __future__ import print_function + +import os +import fnmatch +import re +import sys +import subprocess +import yaml + +# Always prefer setuptools over distutils +from setuptools import setup, Command +from setuptools_lint.setuptools_command import PylintCommand +from six import string_types +from six.moves import reload_module +from yamllint.config import YamlLintConfig +from yamllint.cli import Format +from yamllint import linter + +rootdir = os.path.join(os.getcwd(), '../..') + + +def find_files(base_dir, exclude_dirs, include_dirs, file_regex): + ''' find files matching file_regex ''' + found = [] + exclude_regex = '' + include_regex = '' + + if exclude_dirs is not None: + exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.' + + # Don't use include_dirs, it is broken + if include_dirs is not None: + include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.' + + for root, dirs, files in os.walk(base_dir): + if exclude_dirs is not None: + # filter out excludes for dirs + dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)] + + if include_dirs is not None: + # filter for includes for dirs + dirs[:] = [d for d in dirs if re.match(include_regex, d)] + + matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None] + found.extend(matches) + + return found + + +def recursive_search(search_list, field): + """ + Takes a list with nested dicts, and searches all dicts for a key of the + field provided. If the items in the list are not dicts, the items are not + processed. + """ + fields_found = [] + + for item in search_list: + if isinstance(item, dict): + for key, value in item.items(): + if key == field: + fields_found.append(value) + elif isinstance(value, list): + results = recursive_search(value, field) + for result in results: + fields_found.append(result) + + return fields_found + + +def find_playbooks(): + ''' find Ansible playbooks''' + all_playbooks = set() + included_playbooks = set() + + exclude_dirs = ('adhoc', 'tasks', 'archive') + for yaml_file in find_files( + os.path.join(rootdir, 'notactivatedyet'), + exclude_dirs, None, r'\.ya?ml$'): + with open(yaml_file, 'r') as contents: + for task in yaml.safe_load(contents) or {}: + if not isinstance(task, dict): + # Skip yaml files which are not a dictionary of tasks + continue + if 'include' in task or 'import_playbook' in task: + # Add the playbook and capture included playbooks + all_playbooks.add(yaml_file) + if 'include' in task: + directive = task['include'] + else: + directive = task['import_playbook'] + included_file_name = directive.split()[0] + included_file = os.path.normpath( + os.path.join(os.path.dirname(yaml_file), + included_file_name)) + included_playbooks.add(included_file) + elif 'hosts' in task: + all_playbooks.add(yaml_file) + return all_playbooks, included_playbooks + + +class AADYamlLint(Command): + ''' Command to run yamllint ''' + description = "Run yamllint tests" + user_options = [ + ('excludes=', 'e', 'directories to exclude'), + ('config-file=', 'c', 'config file to use'), + ('format=', 'f', 'format to use (standard, parsable)'), + ] + + def initialize_options(self): + ''' initialize_options ''' + # Reason: Defining these attributes as a part of initialize_options is + # consistent with upstream usage + # Status: permanently disabled + # pylint: disable=attribute-defined-outside-init + self.excludes = None + self.config_file = None + self.format = None + + def finalize_options(self): + ''' finalize_options ''' + # Reason: These attributes are defined in initialize_options and this + # usage is consistant with upstream usage + # Status: permanently disabled + # pylint: disable=attribute-defined-outside-init + if isinstance(self.excludes, string_types): + self.excludes = self.excludes.split(',') + if self.format is None: + self.format = 'standard' + assert (self.format in ['standard', 'parsable']), ( + 'unknown format {0}.'.format(self.format)) + if self.config_file is None: + self.config_file = '.yamllint' + assert os.path.isfile(self.config_file), ( + 'yamllint config file {0} does not exist.'.format(self.config_file)) + + def run(self): + ''' run command ''' + if self.excludes is not None: + print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False))) + + config = YamlLintConfig(file=self.config_file) + + has_errors = False + has_warnings = False + + if self.format == 'parsable': + format_method = Format.parsable + else: + format_method = Format.standard_color + + for yaml_file in find_files(rootdir, self.excludes, None, r'\.ya?ml$'): + first = True + with open(yaml_file, 'r') as contents: + for problem in linter.run(contents, config): + if first and self.format != 'parsable': + print('\n{0}:'.format(os.path.relpath(yaml_file))) + first = False + + print(format_method(problem, yaml_file)) + if problem.level == linter.PROBLEM_LEVELS[2]: + has_errors = True + elif problem.level == linter.PROBLEM_LEVELS[1]: + has_warnings = True + + if has_errors or has_warnings: + print('yamllint issues found') + raise SystemExit(1) + +class AADAnsiblePylint(PylintCommand): + ''' Class to override the default behavior of PylintCommand ''' + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def find_all_modules(self): + ''' find all python files to test ''' + exclude_dirs = ('.tox', 'test', 'tests', 'git') + modules = [] + for match in find_files(rootdir, exclude_dirs, None, r'\.py$'): + package = os.path.basename(match).replace('.py', '') + modules.append(('ansible_agnostic_deployer', package, match)) + return modules + + def get_finalized_command(self, cmd): + ''' override get_finalized_command to ensure we use our + find_all_modules method ''' + if cmd == 'build_py': + return self + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def with_project_on_sys_path(self, func, func_args, func_kwargs): + ''' override behavior, since we don't need to build ''' + return func(*func_args, **func_kwargs) + +class AADGenerateValidation(Command): + ''' Command to run generated module validation''' + description = "Run generated module validation" + user_options = [] + + def initialize_options(self): + ''' initialize_options ''' + pass + + def finalize_options(self): + ''' finalize_options ''' + pass + + # self isn't used but I believe is required when it is called. + # pylint: disable=no-self-use + def run(self): + ''' run command ''' + # find the files that call generate + generate_files = find_files('roles', + ['inventory', + 'test', + 'playbooks', + 'utils'], + None, + 'generate.py$') + + if len(generate_files) < 1: + print('Did not find any code generation. Please verify module code generation.') # noqa: E501 + raise SystemExit(1) + + errors = False + for gen in generate_files: + print('Checking generated module code: {0}'.format(gen)) + try: + sys.path.insert(0, os.path.dirname(gen)) + # we are importing dynamically. This isn't in + # the python path. + # pylint: disable=import-error + import generate + reload_module(generate) + generate.verify() + except generate.GenerateAnsibleException as gae: + print(gae.args) + errors = True + + if errors: + print('Found errors while generating module code.') + raise SystemExit(1) + + print('\nAll generate scripts passed.\n') + + +class AADSyntaxCheck(Command): + ''' Command to run Ansible syntax check''' + description = "Run Ansible syntax check" + user_options = [] + + # Colors + FAIL = '\033[31m' # Red + ENDC = '\033[0m' # Reset + + def initialize_options(self): + ''' initialize_options ''' + pass + + def finalize_options(self): + ''' finalize_options ''' + pass + + def deprecate_jinja2_in_when(self, yaml_contents, yaml_file): + ''' Check for Jinja2 templating delimiters in when conditions ''' + test_result = False + failed_items = [] + + search_results = recursive_search(yaml_contents, 'when') + for item in search_results: + try: + if isinstance(item, str): + if '{{' in item or '{%' in item: + failed_items.append(item) + if isinstance(item, bool): + continue + else: + for sub_item in item: + if '{{' in sub_item or '{%' in sub_item: + failed_items.append(sub_item) + except: + print('{}Error: Type of \'when\' not recognized' + ' File: {}'.format(self.FAIL, yaml_file)) + print(' Found: "{}"'.format(item)) + test_result = True + + if len(failed_items) > 0: + print('{}Error: Usage of Jinja2 templating delimiters in when ' + 'conditions is deprecated in Ansible 2.3.\n' + ' File: {}'.format(self.FAIL, yaml_file)) + for item in failed_items: + print(' Found: "{}"'.format(item)) + print(self.ENDC) + test_result = True + + return test_result + + def deprecate_include(self, yaml_contents, yaml_file): + ''' Check for usage of include directive ''' + test_result = False + + search_results = recursive_search(yaml_contents, 'include') + + if len(search_results) > 0: + print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n' + 'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n' + ' File: {}'.format(self.FAIL, yaml_file)) + for item in search_results: + print(' Found: "include: {}"'.format(item)) + print(self.ENDC) + test_result = True + + return test_result + + def run(self): + ''' run command ''' + + has_errors = False + + print('#' * 60) + print('Ansible Deprecation Checks') + exclude_dirs = ('adhoc', '.tox', 'archive', 'files') + for yaml_file in find_files( + rootdir, exclude_dirs, None, r'\.ya?ml$'): + with open(yaml_file, 'r') as contents: + yaml_contents = yaml.safe_load(contents) + if not isinstance(yaml_contents, list): + continue + + # Check for Jinja2 templating delimiters in when conditions + result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file) + has_errors = result or has_errors + + # Check for usage of include: directive + result = self.deprecate_include(yaml_contents, yaml_file) + has_errors = result or has_errors + + if not has_errors: + print('...PASSED') + + all_playbooks, included_playbooks = find_playbooks() + + print('#' * 60) + print('Invalid Playbook Include Checks') + invalid_include = [] + for playbook in included_playbooks: + # Ignore imported playbooks in 'common', 'private' and 'init'. It is + # expected that these locations would be imported by entry point + # playbooks. + # Ignore playbooks in 'aws', 'gcp' and 'openstack' because these + # playbooks do not follow the same component entry point structure. + # Ignore deploy_cluster.yml and prerequisites.yml because these are + # entry point playbooks but are imported by playbooks in the cloud + # provisioning playbooks. + ignored = ('common', 'private', 'init', + 'aws', 'gcp', 'openstack', + 'deploy_cluster.yml', 'prerequisites.yml') + if any(x in playbook for x in ignored): + continue + invalid_include.append(playbook) + if invalid_include: + print('{}Invalid included playbook(s) found. Please ensure' + ' component entry point playbooks are not included{}'.format(self.FAIL, self.ENDC)) + invalid_include.sort() + for playbook in invalid_include: + print('{}{}{}'.format(self.FAIL, playbook, self.ENDC)) + has_errors = True + + if not has_errors: + print('...PASSED') + + print('#' * 60) + print('Ansible Playbook Entry Point Syntax Checks') + # Evaluate the difference between all playbooks and included playbooks + entrypoint_playbooks = sorted(all_playbooks.difference(included_playbooks)) + print('Entry point playbook count: {}'.format(len(entrypoint_playbooks))) + for playbook in entrypoint_playbooks: + print('-' * 60) + print('Syntax checking playbook: {}'.format(playbook)) + + # Error on any entry points in 'common' or 'private' + invalid_entry_point = ('common', 'private') + if any(x in playbook for x in invalid_entry_point): + print('{}Invalid entry point playbook or orphaned file. Entry' + ' point playbooks are not allowed in \'common\' or' + ' \'private\' directories{}'.format(self.FAIL, self.ENDC)) + has_errors = True + + # --syntax-check each entry point playbook + try: + # Create a host group list to avoid WARNING on unmatched host patterns + tox_ansible_inv = os.environ['TOX_ANSIBLE_INV_PATH'] + subprocess.check_output( + ['ansible-playbook', '-i', tox_ansible_inv, + '--syntax-check', playbook, '-e', '@{}_extras'.format(tox_ansible_inv), + '-e', 'ANSIBLE_REPO_PATH={}'.format(os.path.join(rootdir, 'ansible'))] + , + ) + except subprocess.CalledProcessError as cpe: + print('{}Execution failed: {}{}'.format( + self.FAIL, cpe, self.ENDC)) + has_errors = True + + if has_errors: + raise SystemExit(1) + + +class UnsupportedCommand(Command): + ''' Basic Command to override unsupported commands ''' + user_options = [] + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def initialize_options(self): + ''' initialize_options ''' + pass + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def finalize_options(self): + ''' initialize_options ''' + pass + + # Reason: This method needs to be an instance method to conform to the + # overridden method's signature + # Status: permanently disabled + # pylint: disable=no-self-use + def run(self): + ''' run command ''' + print("Unsupported command for ansible_agnostic_deployer") + + +setup( + name='ansible_agnostic_deployer', + license="Apache 2.0", + cmdclass={ + 'install': UnsupportedCommand, + 'develop': UnsupportedCommand, + 'build': UnsupportedCommand, + 'build_py': UnsupportedCommand, + 'build_ext': UnsupportedCommand, + 'egg_info': UnsupportedCommand, + 'sdist': UnsupportedCommand, + 'lint': AADAnsiblePylint, + 'yamllint': AADYamlLint, + 'generate_validation': AADGenerateValidation, + 'ansible_syntax': AADSyntaxCheck, + }, + packages=[], +) diff --git a/tests/static/syntax-check.sh b/tests/static/syntax-check.sh new file mode 100755 index 0000000..322b1e8 --- /dev/null +++ b/tests/static/syntax-check.sh @@ -0,0 +1,232 @@ +#!/bin/bash +set -eo pipefail + +ORIG=$(cd $(dirname $0); cd ../..; pwd) +ansible_path=${ORIG}/ansible +static=${ORIG}/tests/static + +cd ${ORIG} + +output=$(mktemp) + +baseref=$1 +headref=$2 + +# find_yamllint() finds the closest .yamllint file in current or +# parent dirs. +# The result is printed on stdout. +# If no yamllint is found, nothing is printed. +find_yamllint() { + local f=$1 + local dir + if [ -d "${f}" ]; then + dir="${f}" + else + dir="$(dirname "${f}")" + fi + + while true; do + if [ -e "${dir}/.yamllint" ]; then + echo "${dir}/.yamllint" + return 0 + fi + + # not found + if [ "${dir}" = "." ] || [ "${dir}" = "${ORIG}" ] || [ "${dir}" = "/" ]; then + return 2 + fi + + # Go one dir up + dir=$(dirname "${dir}") + done +} + +# Given a specific directory or file, this function runs yamllint +# with the appropriate .yamllint conf file. +do_yamllint() { + local f=$1 + local conf + + if [ -f "${f}" ]; then + [[ $f =~ \.ya?ml$ ]] || [[ $f =~ \.yamllint$ ]] || return + fi + + if ! conf=$(find_yamllint "${f}"); then + echo "WARNING ........ yamllint: No conf .yamllint found for ${f}" + return + fi + + ( + f=$(realpath "${f}") + cd $(dirname ${conf}) + yamllint "${f}" &> $output + ) + + if [ $? = 0 ]; then + echo "OK .......... yamllint ${f}" + else + echo "FAIL ........ yamllint ${f}" + echo + cat $output + exit 2 + fi +} + +do_ansible_syntax() { + i="${1}" + item=$(basename $(dirname ${i}))/$(basename ${i}) + + extra_args=() + + config=$(basename $(dirname "${i}")) + + if ! egrep --quiet ^env_type: ${i}; then + echo "No env_type found in ${i}" + fi + env_type=$(egrep ^env_type: ${i}|cut -d' ' -f 2) + + # Ansible Workshops AKA as Linklight needs to be downloaded + if [ "${env_type}" = linklight ] || [ "${env_type}" = ansible-workshops ] || [ "${env_type}" = aap2-ansible-workshops ]; then + if [ ! -d ${ansible_path}/workdir/${env_type} ]; then + set +e + + git clone --branch devel \ + https://github.com/ansible/workshops.git \ + ${ansible_path}/workdir/${env_type} &> $output + + if [ $? = 0 ]; then + commit=$(cd ${ansible_path}/workdir/${env_type}; PAGER=cat git show --no-patch --format=oneline --no-color) + echo "OK .......... ${item} / Download ansible-workshop -- $commit" + else + echo "FAIL ........ ${item} / Download ansible-workshop" + echo + cat $output + exit 2 + fi + + set -e + fi + touch $(dirname "${i}")/env_secret_vars.yml + extra_args=( + -e ANSIBLE_REPO_PATH=${ansible_path} + ) + fi + + if [ -e "${ansible_path}/configs/${env_type}/hosts" ]; then + inventory=(-i "${ansible_path}/configs/${env_type}/hosts") + else + inventory=(-i "${static}/tox-inventory.txt") + fi + + # Setup galaxy roles and collections, make sure it works + set +e + ansible-playbook --tags galaxy_roles \ + "${inventory[@]}" \ + ${ansible_path}/main.yml \ + ${extra_args[@]} \ + -e @${i} &> $output + + if [ $? = 0 ]; then + echo "OK .......... Galaxy roles ${item}" + else + echo "FAIL ........ Galaxy roles ${item}" + echo + cat $output + exit 2 + fi + + for playbook in \ + ${ansible_path}/main.yml \ + ${ansible_path}/destroy.yml; do + + ansible-playbook --syntax-check \ + --list-tasks \ + "${inventory[@]}" \ + "${playbook}" \ + ${extra_args[@]} \ + -e @${i} &> $output + if [ $? = 0 ]; then + echo "OK .......... syntax-check ${item} / ${playbook}" + else + echo "FAIL ........ syntax-check ${item} / ${playbook}" + echo + cat $output + exit 2 + fi + done + # lifecycle (stop / start) + + for ACTION in stop start status; do + ansible-playbook --syntax-check \ + --list-tasks \ + "${inventory[@]}" \ + ${ansible_path}/lifecycle_entry_point.yml \ + ${extra_args[@]} \ + -e ACTION=${ACTION} \ + -e @${i} &> $output + if [ $? = 0 ]; then + echo "OK .......... syntax-check ${item} / lifecycle ${ACTION}" + else + echo "FAIL ........ syntax-check ${item} / lifecycle ${ACTION}" + echo + cat $output + exit 2 + fi + done +} + +if [[ ${baseref} ]]; then + ########################################################## + # PULL REQUEST + # SHORT version for pull_request action, only a few files + ########################################################## + changed_files=$(mktemp) + # look for Added or modified files only + git diff \ + --no-commit-id \ + --name-only \ + --diff-filter=AM \ + origin/${baseref}...${headref} > $changed_files + + set +e + while read f; do + if [[ ${f} =~ .*\.ya?ml ]]; then + do_yamllint "${f}" + fi + done < ${changed_files} + + # look at all files of the PR, then filter the configs + changed_configs=$(mktemp) + + git diff \ + --no-commit-id \ + --name-only \ + origin/${baseref}...${headref} \ + | grep ansible/configs \ + | perl -pe 's{.*ansible/configs/([^/]+).*}{$1}' \ + | sort \ + | uniq > ${changed_configs} + + while read f; do + for i in $(find ansible/configs/${f} ! -path "ansible/configs/archive/*" -name 'sample_vars*.y*ml' | sort); do + do_ansible_syntax "${i}" + done + done < ${changed_configs} + set -e +else + set +e + ########################################################## + # Push + # LONG version for push action + ########################################################## + for YAMLLINT in $(find ansible -name .yamllint ! -path "ansible/configs/archive/*"); do + do_yamllint "$(dirname ${YAMLLINT})" + done + set -e + + for i in $(find ${ORIG}/ansible/configs ! -path "${ORIG}/ansible/configs/archive/*" -name 'sample_vars*.y*ml' | sort); do + do_ansible_syntax "${i}" + done +fi + +exit 0 diff --git a/tests/static/test-requirements.txt b/tests/static/test-requirements.txt new file mode 100644 index 0000000..88ffcf7 --- /dev/null +++ b/tests/static/test-requirements.txt @@ -0,0 +1,57 @@ +ansible==12.1.0 +ansible-compat==25.8.2 +ansible-core==2.19.3 +ansible-lint==25.9.2 +appdirs==1.4.4 +astroid==3.3.11 +attrs==25.4.0 +black==25.9.0 +bracex==2.6 +cachetools==6.2.0 +cffi==2.0.0 +chardet==5.2.0 +click==8.3.0 +colorama==0.4.6 +cryptography==46.0.2 +dill==0.4.0 +distlib==0.4.0 +distro==1.9.0 +filelock==3.20.0 +flake8==7.3.0 +importlib_metadata==8.7.0 +iniconfig==2.1.0 +isort==6.1.0 +Jinja2==3.1.6 +jsonschema==4.25.1 +jsonschema-specifications==2025.9.1 +lazy-object-proxy==1.12.0 +MarkupSafe==3.0.3 +mccabe==0.7.0 +mypy_extensions==1.1.0 +packaging==25.0 +pathspec==0.12.1 +platformdirs==4.5.0 +pluggy==1.6.0 +py==1.11.0 +pycodestyle==2.14.0 +pycparser==2.23 +pyflakes==3.4.0 +pylint==3.3.9 +pyproject-api==1.9.1 +pytokens==0.1.10 +PyYAML==6.0.3 +referencing==0.36.2 +resolvelib==1.2.0 +rpds-py==0.27.1 +ruamel.yaml==0.18.15 +ruamel.yaml.clib==0.2.14 +six==1.17.0 +subprocess-tee==0.4.2 +toml==0.10.2 +tomlkit==0.13.3 +tox==4.30.3 +virtualenv==20.34.0 +wcmatch==10.1 +wrapt==1.17.3 +yamllint==1.37.1 +zipp==3.23.0 diff --git a/tests/static/tox-inventory.txt b/tests/static/tox-inventory.txt new file mode 100644 index 0000000..a502518 --- /dev/null +++ b/tests/static/tox-inventory.txt @@ -0,0 +1,67 @@ +[OSEv3] +localhost + + +[OSEv3:children] +etcd +masters +nodes +glusterfs +glusterfs_registry + +[etcd] +localhost + +[masters] +localhost + +[nodes] +localhost + +[glusterfs] +localhost + +[glusterfs_registry] +localhost + +[towers] +localhost + +[bastions] +localhost + +[infranodes] +localhost + +[support] +localhost + +[provisioners] +localhost + +[windows] +localhost + +[network] +localhost + +[clientvms] +localhost + +[newnodes] +localhost + +[managed_nodes] +localhost + +[control_nodes] +localhost + +[security_connection_check] +localhost + +[satellites] +localhost + +[utility] +localhost diff --git a/tests/static/tox-inventory.txt_extras b/tests/static/tox-inventory.txt_extras new file mode 100644 index 0000000..99316ce --- /dev/null +++ b/tests/static/tox-inventory.txt_extras @@ -0,0 +1,3 @@ +--- +hostvars: + localhost: {} \ No newline at end of file diff --git a/tests/static/tox.ini b/tests/static/tox.ini new file mode 100644 index 0000000..00a8ff3 --- /dev/null +++ b/tests/static/tox.ini @@ -0,0 +1,19 @@ +[tox] +minversion=2.3.1 +envlist = + #py{27,35}-{flake8,pylint,unit} + #py27-{yamllint,ansible_syntax,generate_validation} + py312-ansible_syntax + # py312-ansible_syntax +skipsdist=True +skip_missing_interpreters=True + +[testenv] +skip_install=True +setenv = TOX_ANSIBLE_INV_PATH = {toxinidir}/tox-inventory.txt +allowlist_externals = */cloud_vm_workloads/tests/static/syntax-check.sh +deps = + -r{toxinidir}/test-requirements.txt + +commands = + ansible_syntax: {toxinidir}/syntax-check.sh {posargs}