diff --git a/.circleci/config.yml b/.circleci/config.yml index 20c1262..1f833e9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,3 +1,4 @@ +--- version: 2 jobs: build: @@ -6,7 +7,7 @@ jobs: environment: PIPENV_VENV_IN_PROJECT: true - image: mockserver/mockserver - name: mockserver.mockserver + name: mockserver steps: - checkout # check out source code to working directory - run: @@ -16,11 +17,18 @@ jobs: - run: command: | cp .circleci/mockserver.py user_variables.py + cp .circleci/mockserver.yaml user_variables.yaml + cp .circleci/mockserver.json user_variables.json pip install pipenv pipenv install --dev - run: command: | - pipenv run python -m unittest discover + COVERAGE_FILE=standard.coverage pipenv run coverage run -m unittest discover + COVERAGE_FILE=special1.coverage pipenv run coverage run -m unittest tests/special_test_settings_no_variables.py + pipenv run coverage combine *.coverage + pipenv run coverage html + mkdir test-results + tar -cf test-results/coverage_report.tar htmlcov/ - store_test_results: path: test-results - store_artifacts: diff --git a/.circleci/mockserver.json b/.circleci/mockserver.json new file mode 100644 index 0000000..f6b7389 --- /dev/null +++ b/.circleci/mockserver.json @@ -0,0 +1,14 @@ +{ + "FULL_SET": { + "mockserver1": { + "url": "mockserver.philly:1080", + "tenant": { + "tenant1": "mockenv" + }, + "verify_ssl": false, + "is_managed": true, + "cluster_token": "cluster_token_here" + } + }, + "LOG_LEVEL": "WARNING" +} \ No newline at end of file diff --git a/.circleci/mockserver.py b/.circleci/mockserver.py index acfb7fd..51c2de3 100644 --- a/.circleci/mockserver.py +++ b/.circleci/mockserver.py @@ -3,18 +3,19 @@ "mockserver1": { "url": "mockserver:1080", "tenant": { - "tenant1": "mockserver", + "tenant1": "mockenv", }, "api_token": { "tenant1": "sample_api_token", }, "verify_ssl": False, - "is_managed": False, + "is_managed": True, "cluster_token": "Required for Cluster Operations in Managed" } } -LOG_LEVEL="INFO" +LOG_OUTPUT = "FILE" +LOG_LEVEL = "INFO" # ROLE TYPE KEYS # access_env diff --git a/.circleci/mockserver.yaml b/.circleci/mockserver.yaml new file mode 100644 index 0000000..96e95e3 --- /dev/null +++ b/.circleci/mockserver.yaml @@ -0,0 +1,37 @@ +--- +FULL_SET: + mockserver1: + url: mockserver:1080 + tenant: + tenant1: mockenv + api_token: + tenant1: "sample_api_token" + verify_ssl: False + is_managed: True + cluster_token: "Required for Cluster Operations in Managed" + + +LOG_ENABLED: True +LOG_LEVEL: "DEBUG" +LOG_OUTPUT: "FILE" + +# ROLE TYPE KEYS +# access_env +# change_settings +# install_agent +# view_logs +# view_senstive +# change_sensitive + +USER_GROUPS: + role_types: + access_env: accessenv + change_settings: changesettings + view_logs: logviewer + view_sensitive: viewsensitive + role_tenants: + - nonprod + - prod + +USER_GROUP_TEMPLATE: prefix_{USER_TYPE}_{TENANT}_{APP_NAME}_suffix +DEFAULT_TIMEZONE: America/Chicago diff --git a/.coveragerc b/.coveragerc index b961959..93a6d95 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,7 +1,7 @@ [run] omit = # omit anything in a .local directory anywhere - */.local/* + */lib/python*/* # omit everything in /usr /usr/* # omit in the test tools diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index d093e8a..612cdc6 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -1,3 +1,4 @@ +--- ########################### ########################### ## Linter GitHub Actions ## @@ -40,12 +41,34 @@ jobs: - name: Checkout Code uses: actions/checkout@v2 + - run: mkdir -p super-linter.report ################################ # Run Linter against code base # ################################ - name: Lint Code Base - uses: docker://github/super-linter:v3 + uses: github/super-linter@v3.11.0 env: - VALIDATE_ALL_CODEBASE: false + VALIDATE_ALL_CODEBASE: true DEFAULT_BRANCH: master + LINTER_RULES_PATH: . + PYTHON_PYLINT_CONFIG_FILE: .pylintrc GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LOG_FILE: super-linter.log + OUTPUT_FOLDER: super-linter.report + OUTPUT_FORMAT: tap + OUTPUT_DETAILS: detailed + VALIDATE_YAML: true + VALIDATE_JSON: true + VALIDATE_EDITORCONFIG: true + VALIDATE_MARKDOWN: true + VALIDATE_PYTHON_PYLINT: true + - run: ls + ######################## + # Retreive tap reports # + ######################## + - name: Archive super-linter tap reports + uses: actions/upload-artifact@v2 + if: failure() + with: + name: linter-report + path: super-linter.report diff --git a/.gitignore b/.gitignore index 3465291..454aaa9 100644 --- a/.gitignore +++ b/.gitignore @@ -11,9 +11,12 @@ templates/* **venv** # Framework log files ***.log** +***.tap** +.converge -user_variables.py +user_variables.* sandbox_script.py +htmlcov/* !variable_sets/template.py !scripts/template.py diff --git a/.pylintrc b/.pylintrc index 2e0f5c5..d7f9509 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1 +1,596 @@ -init-hook='from sys.path import append; from os import getcwd; append(getcwd())' \ No newline at end of file +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Specify a score threshold to be exceeded before program exits with error. +fail-under=10 + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= +#init-hook='from sys.path import append; from os import getcwd; append(getcwd())' +init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()))" + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=print-statement, + parameter-unpacking, + unpacking-in-except, + old-raise-syntax, + backtick, + long-suffix, + old-ne-operator, + old-octal-literal, + import-star-module-level, + non-ascii-bytes-literal, + raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + apply-builtin, + basestring-builtin, + buffer-builtin, + cmp-builtin, + coerce-builtin, + execfile-builtin, + file-builtin, + long-builtin, + raw_input-builtin, + reduce-builtin, + standarderror-builtin, + unicode-builtin, + xrange-builtin, + coerce-method, + delslice-method, + getslice-method, + setslice-method, + no-absolute-import, + old-division, + dict-iter-method, + dict-view-method, + next-method-called, + metaclass-assignment, + indexing-exception, + raising-string, + reload-builtin, + oct-method, + hex-method, + nonzero-method, + cmp-method, + input-builtin, + round-builtin, + intern-builtin, + unichr-builtin, + map-builtin-not-iterating, + zip-builtin-not-iterating, + range-builtin-not-iterating, + filter-builtin-not-iterating, + using-cmp-argument, + eq-without-hash, + div-method, + idiv-method, + rdiv-method, + exception-message-attribute, + invalid-str-codec, + sys-max-int, + bad-python3-import, + deprecated-string-function, + deprecated-str-translate-call, + deprecated-itertools-function, + deprecated-types-field, + next-method-defined, + dict-items-not-iterating, + dict-keys-not-iterating, + dict-values-not-iterating, + deprecated-operator-function, + deprecated-urllib-function, + xreadlines-attribute, + deprecated-sys-function, + exception-escape, + comprehension-escape + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma, + dict-separator + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX + +# Regular expression of note tags to take in consideration. +#notes-rgx= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=BaseException, + Exception diff --git a/Documentation/GETTING_STARTED.md b/Documentation/GETTING_STARTED.md deleted file mode 100644 index e69de29..0000000 diff --git a/Documentation/HOWTO.md b/Documentation/HOWTO.md deleted file mode 100644 index e69de29..0000000 diff --git a/Documentation/INSTALLATION.md b/Documentation/INSTALLATION.md index 101c9a6..b06fd44 100644 --- a/Documentation/INSTALLATION.md +++ b/Documentation/INSTALLATION.md @@ -12,7 +12,7 @@ If you see a TODO for something related, please reach out to Aaron Philipose on ### Install indepentantly - [Download the latest version of Python 3](https://www.python.org/downloads/) -- Run the installer +- Run the installer - Use default options. - Select to add PYTHON_HOME to PATH - Open PowerShell or Command Line diff --git a/Documentation/MODULE_GLOSSARY.md b/Documentation/MODULE_GLOSSARY.md deleted file mode 100644 index f23447a..0000000 --- a/Documentation/MODULE_GLOSSARY.md +++ /dev/null @@ -1,330 +0,0 @@ -# Function Glossary -*Explain all functions in one file to help find the ones you may need* -
-
- -*\* Asterisk means optional argument* -## dynatrace.cluster -### cluster_config.py -- get_node_info(Cluster Dict: cluster) - - Return: JSON Dict - - Status: Ready for Use - - Description: Get cluster node information, such as node id, Hardware Info, JVM info, URIs -- get_node_config(Cluster Dict: cluster) - - Return: JSON Dict - - Status: Ready for Use - - Description: Get node configurations such as, WebUI enabled, Agent enabled, id, IP Addresses, datacenter -- set_node_config(Cluster Dict: cluster, Dict: json) - - Return: HTTP Status Code - - Status: **UNTESTED** - - Description: Set node configurations such as, WebUI enabled, Agent enabled, id, IP Addresses, datacenter - -### ssl.py -Notes: -Entity Type can only be "COLLECTOR" or "SERVER" (case sensitive). -In addition, when pushing SSL certs via the API, it is HIGHLY RECOMMENDED to allow local logins (aka Non-SSO logins) during the change. Enable SSO-only after you can manually check the cluster to verify there are no issues with SSO. - -- get_cert_details (Cluster Dict: cluster, String: entity_type, String: entity_id) - - Return: Dict - - Status: Ready for Use - - Description: Get JSON of information about the current SSL certificate in use by a specific Server Node(For sure) or Cluster ActiveGate(I think?) -- get_cert_install_status(Cluster Dict: cluster, String: entity_id) - - Return: String - - Status: **UNTESTED** - - Description: Not sure fully of the usage. I think it is for getting the status of a certificate update. -- set_cert(Cluster Dict: cluster, String: entity_type, String: entity_id, Dict: ssl_json) - - Return: Dict - - Status: Ready to Use - -### sso.py -Notes: Some of these API commands are not advertised in the Cluster Management API - -- disable_sso (Cluster Dict: cluster) - - Return: HTTP Status Code - - State: Ready for Use - - Description: Turns off SSO in the environment. Can be especially useful if SSO breaks and you need to login with a local account -- enable_sso (Cluster Dict: cluster, Boolean: disable_local*, Boolean: groups_enabled*, Boolean: is_openid*) - - Return: HTTP Status Code - - State: Ready for Use (Only tested with already linked SSO) - - Description: Enables SSO that is already configured but disabled. By default, local login is still enabled, groups are not passed via SSO and uses SAML over OpenID. -- get_sso_status (Cluster Dict: cluster) - - Return: Dict - - State: Ready for Use - - Description: Shows the current authentication settings related to SSO - -### user_groups.py -- create_app_groups (String: app_name)
- - Return: Nothing - - Status: **LIMITED** - - Description: Takes the application and creates user groups for an application set-wide.
This is currently only applying a single format:
({User_Prefix}\_{Role_Type}\_{Tenant}_{User_Suffix})
User Prefix/Suffix and Role Type are set in the variable sets - - Current Plans: - - Refactor to a function that is for a single cluster and one for the set - - Ignore any SaaS environments in the set - - Allow for user group definited to be templated, so that a user can plug in their own group format - - Add Suffix logic -- delete_app_groups (String: app_name)
- - Return: Nothing - - Status: **LIMITED** - - Description: Takes the application and removes user groups for an application set-wide.
This is currently only applying a single format:
({User_Prefix}\_{Role_Type}\_{Tenant}_{User_Suffix})
User Prefix/Suffix and Role Type are set in the variable sets - - Current Plans: - - Refactor to a function that is for a single cluster and one for the set - - Ignore any SaaS environments in the set - - Allow for user group definited to be templated, so that a user can plug in their own group format - - Add Suffix Logic -- create_app_clusterwide (Cluster Dict: cluster, String: app_name, Dict of String List: zones*) - - Return: Nothing - - Status: **INCOMPLETE** - - Description: Create all user groups, and management zones and assign the new user groups to have appropriate permissions of the new management zones created
- "zones" is an optional argument. it is a dict of string lists. The intention is that each key would be the same as the cluster tenant name, and the embedded list will contain all the customer environments/lifecycles that will need their own management zone.
Management would be created in the format "{APP}" or "{APP} - {ENV}" - - Current Plans: - - Assign appropriate permissions to the user group from the new management zones - - Creating user groups has same limitations as "[create_app_groups](#create_app_groups)" - -### users.py -Module Notes: If SaaS is passed, by default it is ignored without error or notice. For notice, pass ignore_saas=False into the functions and it will raise an exception - -- check_is_managed(Cluster Dict: cluster, Boolean: ignore_saas) - - Return: If current cluster is Managed - - Status: Ready for Use - - Description: Internal function mostly to check if the cluster is Managed. -- get_users(Cluster Dict: cluster, Boolean: ignore_saas*) - - Return: JSON of users data in cluster - - Status: Ready for Use - - Description: Get all users in cluster and details. -- add_user(Cluster Dict: cluster, Dict: user_json, Boolean: ignore_saas*) - - Return: 'OK' - - Status: Ready for Use - - Description: Add user to the cluster according to user_json Dict -- update_user(Cluster Dict: cluster, Dict: user_json, Boolean: ignore_saas*) - - Return: 'OK' - - Status: Ready for Use - - Description: Update user information for the cluster according to user_json Dict -- get_user (Cluster Dict: cluster, String: user_id, Boolean: ignore_saas*) - - Return: JSON - - Status: Ready for Use - - Description: Get information for a single user by giving the user id -- delete_user (Cluster Dict: cluster, String: user_id, Boolean: ignore_saas*) - - Return: JSON - - Status: Ready for Use - - Description: Delete single user from the Managed Cluster -- add_user_bulk (Cluster Dict: cluster, Dict: user_json, Boolean: ignore_saas*) - - Return: 'OK' - - Status: Ready for Use - - Description: Add multiple users to the cluster according to the user_json Dict - -## dynatrace.requests - -### request_hander.py -*Class Notes:
-Cluster Dict is a single cluster defined in the FULL_SET set in user_variables and follows that structure
-Endpoints should not start with a "/"
-Params are Dict of parameters that are directly passed to the API, Key should match Dynatrace param name* - -- check_response (Cluster Dict: cluster, String: endpoint, Dict: params\*) - - Return: Response Object - - Status: Ready for Use - - Description: Checks if the response is within the HTTP 200-299 for a successful transaction. Otherwise raises an exception with the error
-- check_managed (Cluster Dict: cluster, String: endpoint, Dict: params\*) - - Return: Response Object - - Status: Ready for Use - - Description: Checks if the cluster instance provided is Managed or SaaS.
- - Current Plans: - - Allow ignore by default, so exception isn't raised and the function just carries on, skipping SaaS instances. -- cluster_get (Cluster Dict: cluster, String: endpoint, Dict: params\*) - - Return: Response Object - - Status: Ready for Use - - Description: GET Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- cluster_post (Cluster Dict: cluster, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Description: POST Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- cluster_put (Cluster Dict: cluster, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Description: PUT Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- cluster_delete (Cluster Dict: cluster, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Description: DELETE Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- env_get (Cluster Dict: cluster, String: tenant, String: endpoint, Dict: params\*) - - Return: Response Object - - Status: Ready for Use - - Description: GET Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- env_post (Cluster Dict: cluster, String: tenant, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Description: POST Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- env_put (Cluster Dict: cluster, String: tenant, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Description: PUT Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- env_delete (Cluster Dict: cluster, String: tenant, String: endpoint, Dict: params\*) - - Return: Response Object - - Status: Ready for Use - - Description: DELETE Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- config_get (Cluster Dict: cluster, String: tenant, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Description: GET Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- config_post (Cluster Dict: cluster, String: tenant, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Description: POST Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- config_put (Cluster Dict: cluster, String: tenant, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Description: PUT Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Current Plans: - - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function -- config_delete (Cluster Dict: cluster, String: tenant, String: endpoint, Dict: params\*, Dict: json\*) - - Return: Response Object - - Status: Ready for Use - - Current Plans: - - Description: DELETE Request for Cluster API Operations, passing in the Cluster Dictionary, this will ensure that the cluster passed through is managed.
- - Allow specifications of what to return (e.g full response object, status code, json payload) with an option argument in function - -## dynatrace.tenant - -### host_groups.py - -- get_host_groups_tenantwide(Cluster Dict: cluster, String: tenant) - - Return: Dict - - Status: Ready for Use - - Description: Get all Host Groups in a tenant. Dict uses HostGroup ID for the Key -- get_host_groups_tenantwide(Cluster Dict: cluster, String: tenant) -- get_host_groups_clusterwide (Cluster Dict: cluster) - - Return: Dict - - Status: Ready for Use - - Description: Get all Host Groups in a Cluster. Dict uses HostGroup ID for the Key - - Current Plan: - - Add split_by_tenant optional variable to return all host groups in nested Dicts by tenant -- get_host_groups_setwide (Dict of Cluster Dict: setwide) - - Return: Dict - - Status: Ready for Use - - Description: Get all Host Groups in the full_set of Clusters. Dict uses HostGroup ID for the Key - - Current Plan: - - Add split_by_tenant optional variable to return all host groups in nested Dicts by cluster and then again by tenant - -## dynatrace.timeseries - -### timeseries.py -Note: Currently V1 only - -- get_timeseries_list (Cluster Dict: cluster, String: tenant, Dict: params\*) - - Return: Dict - - Status: Ready for Use - - Description: Get list of Timeseries Metric Available -- get_timeseries_list (Cluster Dict: cluster, String: tenant, String metric, Dict: params\*) - - Return: List/Dict (Varies based on Metric) - - Status: Ready for Use - - Description: Get individual timeseries metric -- create_custom_metric (Cluster Dict: cluster, String: tenant, String metric, Dict: json, Dict: params\*) - - Return: HTTP Status Code - - Status: **Untested** - - Description: Create custom metric -- delete_custom_metric (Cluster Dict: cluster, String: tenant, String metric) - - Return: HTTP Status Code - - Status: **Untested** - - Description: Delete custom metric using metric ID - -## dynatrace.topology - -### applications.py - -- get_applications_tenantwide (Cluster Dict: cluster, String: Tenant) - - Return: Dict - - Status: Ready for Use - - Description: Returns JSON payload for the list of applications -- get_application (Cluster Dict: cluster, String: tenant, String: entity) - - Return: Dict - - Status: Ready for Use - - Description: Returns a specific application JSON payload referred by its Entity ID -- set_application_properties(Cluster Dict: cluster, String: tenant, String: entity, Dict: prop_json) - - Return: Dict - - Status: Ready for Use - - Description: Update Properties of the Application (at the moment the API only allows adding manual tags) -- get_application_count_tenantwide (Cluster Dict: cluster, String: Tenant) - - Return: Int - - Status: Ready for Use - - Description: Get the number of Applications defined in the tenant -- get_application_count_clusterwide (Cluster Dict: cluster) - - Return: Int - - Status: Ready for Use - - Description: Get the number of Applications defined in the cluster -- get_application_count_setwide (Dict of Cluster Dict: setwide) - - Return: Int - - Status: Ready for Use - - Description: Get the number of Applications defined all the clusters/instances in the set -- get_application_baseline(cluster, tenant, entity) - - Return: Dict - - Status: **UNTESTED** - - Description: Returns baseline information about the application requested - -### custom.py - -- set_custom_properties (Cluster Dict: cluster. String tenant, String: Entity, Dict: prop_json) - - Return: Dict - - Status: **UNTESTED** - - Description: Create/Update custom device. - -### hosts.py -- get_hosts_tenantwide (Cluster Dict: cluster, String: Tenant, Dict: params\*) - - Return: Dict - - Status: Ready for Use - - Description: Returns JSON payload for the list of hosts -- get_hosts_tenantwide (Cluster Dict: cluster, String: Tenant, String: Entity, Dict: params\*) - - Return: Dict - - Status: Ready for Use - - Description: Returns JSON payload for a single host -- set_host_properties(Cluster Dict: cluster, String: tenant, String: entity, Dict: prop_json) - - Return: Dict - - Status: Ready for Use - - Description: Update Properties of the host (at the moment the API only allows adding manual tags) -- get_host_count_clusterwide (Cluster Dict: cluster) - - Return: Int - - Status: Ready for Use - - Description: Get the number of hosts defined in the cluster -- get_host_count_setwide (Dict of Cluster Dict: setwide) - - Return: Int - - Status: Ready for Use - - Description: Get the number of hosts defined all the clusters/instances in the set -- add_host_tags (Cluster Dict: cluster, String: tenant, String: entity, List: tag_list) - - Return: HTTP Status Code - - Status: Ready for Use - - Description: Add tags to host -- get_host_units_tenantwide(Cluster Dict: cluster, String: tenant, List: params\*): - - Return: Number - - Status: Ready for Use - - Description: Tally host units consumed by tenant (can be filtered down with params) - -### process_groups.py -TODO - refer to above topology explanations for now -### process.py -TODO - refer to above topology explanations for now -### services.py -TODO - refer to above topology explanations for now - -### shared.py -NOTE: This is unifying shared operations of multiple layers of the topology. It is advised that you do not use this module and use the other topology functions built on top of this. - - diff --git a/Documentation/PROJECT_OBJECTIVES.md b/Documentation/PROJECT_OBJECTIVES.md index 76c7ff7..e60bcc0 100644 --- a/Documentation/PROJECT_OBJECTIVES.md +++ b/Documentation/PROJECT_OBJECTIVES.md @@ -1,5 +1,5 @@ # Project Objectives -1. Lower the barrier of entry of creating repeatable Dynatrace scripts +1. Lower the barrier of entry of creating repeatable Dynatrace scripts 2. Easy to Understand (Less time learning another new thing and more time using!) 3. Allow other applications to implement this as a foundation () \ No newline at end of file diff --git a/Pipfile b/Pipfile index 393ec6b..5ce3d44 100644 --- a/Pipfile +++ b/Pipfile @@ -7,9 +7,11 @@ verify_ssl = true pylint = "*" autopep8 = "*" coverage = "*" +flake8 = "*" [packages] requests = "*" +pyyaml = "*" [requires] python_version = "3.8" diff --git a/Pipfile.lock b/Pipfile.lock index 19a4db6..5241027 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "d77ab23630511fa40710f418270d79d24bc9d2b8a61ab2d2af4b0e938036b609" + "sha256": "1f6dcbeb815f9efa753d891f95091be3a22aafd06ea237ff11fd5094c8fef0c4" }, "pipfile-spec": 6, "requires": { @@ -18,17 +18,17 @@ "default": { "certifi": { "hashes": [ - "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", - "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" + "sha256:1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c", + "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830" ], - "version": "==2020.6.20" + "version": "==2020.12.5" }, "chardet": { "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" + "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", + "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" ], - "version": "==3.0.4" + "version": "==4.0.0" }, "idna": { "hashes": [ @@ -37,20 +37,47 @@ ], "version": "==2.10" }, + "pyyaml": { + "hashes": [ + "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf", + "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696", + "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393", + "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77", + "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922", + "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5", + "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8", + "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10", + "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc", + "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018", + "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e", + "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253", + "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183", + "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb", + "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185", + "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db", + "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46", + "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b", + "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63", + "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df", + "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc" + ], + "index": "pypi", + "version": "==5.4.1" + }, "requests": { "hashes": [ - "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", - "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" + "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804", + "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e" ], "index": "pypi", - "version": "==2.24.0" + "version": "==2.25.1" }, "urllib3": { "hashes": [ - "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", - "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" + "sha256:1b465e494e3e0d8939b50680403e3aedaa2bc434b7d5af64dfd3c958d7f5ae80", + "sha256:de3eedaad74a2683334e282005cd8d7f22f4d55fa690a2a1020a416cb0a47e73" ], - "version": "==1.25.10" + "version": "==1.26.3" } }, "develop": { @@ -63,57 +90,89 @@ }, "autopep8": { "hashes": [ - "sha256:d21d3901cb0da6ebd1e83fc9b0dfbde8b46afc2ede4fe32fbda0c7c6118ca094" + "sha256:9e136c472c475f4ee4978b51a88a494bfcd4e3ed17950a44a988d9e434837bea", + "sha256:cae4bc0fb616408191af41d062d7ec7ef8679c7f27b068875ca3a9e2878d5443" ], "index": "pypi", - "version": "==1.5.4" + "version": "==1.5.5" + }, + "colorama": { + "hashes": [ + "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b", + "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" + ], + "markers": "sys_platform == 'win32'", + "version": "==0.4.4" }, "coverage": { "hashes": [ - "sha256:098a703d913be6fbd146a8c50cc76513d726b022d170e5e98dc56d958fd592fb", - "sha256:16042dc7f8e632e0dcd5206a5095ebd18cb1d005f4c89694f7f8aafd96dd43a3", - "sha256:1adb6be0dcef0cf9434619d3b892772fdb48e793300f9d762e480e043bd8e716", - "sha256:27ca5a2bc04d68f0776f2cdcb8bbd508bbe430a7bf9c02315cd05fb1d86d0034", - "sha256:28f42dc5172ebdc32622a2c3f7ead1b836cdbf253569ae5673f499e35db0bac3", - "sha256:2fcc8b58953d74d199a1a4d633df8146f0ac36c4e720b4a1997e9b6327af43a8", - "sha256:304fbe451698373dc6653772c72c5d5e883a4aadaf20343592a7abb2e643dae0", - "sha256:30bc103587e0d3df9e52cd9da1dd915265a22fad0b72afe54daf840c984b564f", - "sha256:40f70f81be4d34f8d491e55936904db5c527b0711b2a46513641a5729783c2e4", - "sha256:4186fc95c9febeab5681bc3248553d5ec8c2999b8424d4fc3a39c9cba5796962", - "sha256:46794c815e56f1431c66d81943fa90721bb858375fb36e5903697d5eef88627d", - "sha256:4869ab1c1ed33953bb2433ce7b894a28d724b7aa76c19b11e2878034a4e4680b", - "sha256:4f6428b55d2916a69f8d6453e48a505c07b2245653b0aa9f0dee38785939f5e4", - "sha256:52f185ffd3291196dc1aae506b42e178a592b0b60a8610b108e6ad892cfc1bb3", - "sha256:538f2fd5eb64366f37c97fdb3077d665fa946d2b6d95447622292f38407f9258", - "sha256:64c4f340338c68c463f1b56e3f2f0423f7b17ba6c3febae80b81f0e093077f59", - "sha256:675192fca634f0df69af3493a48224f211f8db4e84452b08d5fcebb9167adb01", - "sha256:700997b77cfab016533b3e7dbc03b71d33ee4df1d79f2463a318ca0263fc29dd", - "sha256:8505e614c983834239f865da2dd336dcf9d72776b951d5dfa5ac36b987726e1b", - "sha256:962c44070c281d86398aeb8f64e1bf37816a4dfc6f4c0f114756b14fc575621d", - "sha256:9e536783a5acee79a9b308be97d3952b662748c4037b6a24cbb339dc7ed8eb89", - "sha256:9ea749fd447ce7fb1ac71f7616371f04054d969d412d37611716721931e36efd", - "sha256:a34cb28e0747ea15e82d13e14de606747e9e484fb28d63c999483f5d5188e89b", - "sha256:a3ee9c793ffefe2944d3a2bd928a0e436cd0ac2d9e3723152d6fd5398838ce7d", - "sha256:aab75d99f3f2874733946a7648ce87a50019eb90baef931698f96b76b6769a46", - "sha256:b1ed2bdb27b4c9fc87058a1cb751c4df8752002143ed393899edb82b131e0546", - "sha256:b360d8fd88d2bad01cb953d81fd2edd4be539df7bfec41e8753fe9f4456a5082", - "sha256:b8f58c7db64d8f27078cbf2a4391af6aa4e4767cc08b37555c4ae064b8558d9b", - "sha256:c1bbb628ed5192124889b51204de27c575b3ffc05a5a91307e7640eff1d48da4", - "sha256:c2ff24df02a125b7b346c4c9078c8936da06964cc2d276292c357d64378158f8", - "sha256:c890728a93fffd0407d7d37c1e6083ff3f9f211c83b4316fae3778417eab9811", - "sha256:c96472b8ca5dc135fb0aa62f79b033f02aa434fb03a8b190600a5ae4102df1fd", - "sha256:ce7866f29d3025b5b34c2e944e66ebef0d92e4a4f2463f7266daa03a1332a651", - "sha256:e26c993bd4b220429d4ec8c1468eca445a4064a61c74ca08da7429af9bc53bb0" + "sha256:03ed2a641e412e42cc35c244508cf186015c217f0e4d496bf6d7078ebe837ae7", + "sha256:04b14e45d6a8e159c9767ae57ecb34563ad93440fc1b26516a89ceb5b33c1ad5", + "sha256:0cdde51bfcf6b6bd862ee9be324521ec619b20590787d1655d005c3fb175005f", + "sha256:0f48fc7dc82ee14aeaedb986e175a429d24129b7eada1b7e94a864e4f0644dde", + "sha256:107d327071061fd4f4a2587d14c389a27e4e5c93c7cba5f1f59987181903902f", + "sha256:1375bb8b88cb050a2d4e0da901001347a44302aeadb8ceb4b6e5aa373b8ea68f", + "sha256:14a9f1887591684fb59fdba8feef7123a0da2424b0652e1b58dd5b9a7bb1188c", + "sha256:16baa799ec09cc0dcb43a10680573269d407c159325972dd7114ee7649e56c66", + "sha256:1b811662ecf72eb2d08872731636aee6559cae21862c36f74703be727b45df90", + "sha256:1ccae21a076d3d5f471700f6d30eb486da1626c380b23c70ae32ab823e453337", + "sha256:2f2cf7a42d4b7654c9a67b9d091ec24374f7c58794858bff632a2039cb15984d", + "sha256:322549b880b2d746a7672bf6ff9ed3f895e9c9f108b714e7360292aa5c5d7cf4", + "sha256:32ab83016c24c5cf3db2943286b85b0a172dae08c58d0f53875235219b676409", + "sha256:3fe50f1cac369b02d34ad904dfe0771acc483f82a1b54c5e93632916ba847b37", + "sha256:4a780807e80479f281d47ee4af2eb2df3e4ccf4723484f77da0bb49d027e40a1", + "sha256:4a8eb7785bd23565b542b01fb39115a975fefb4a82f23d407503eee2c0106247", + "sha256:5bee3970617b3d74759b2d2df2f6a327d372f9732f9ccbf03fa591b5f7581e39", + "sha256:60a3307a84ec60578accd35d7f0c71a3a971430ed7eca6567399d2b50ef37b8c", + "sha256:6625e52b6f346a283c3d563d1fd8bae8956daafc64bb5bbd2b8f8a07608e3994", + "sha256:66a5aae8233d766a877c5ef293ec5ab9520929c2578fd2069308a98b7374ea8c", + "sha256:68fb816a5dd901c6aff352ce49e2a0ffadacdf9b6fae282a69e7a16a02dad5fb", + "sha256:6b588b5cf51dc0fd1c9e19f622457cc74b7d26fe295432e434525f1c0fae02bc", + "sha256:6c4d7165a4e8f41eca6b990c12ee7f44fef3932fac48ca32cecb3a1b2223c21f", + "sha256:6d2e262e5e8da6fa56e774fb8e2643417351427604c2b177f8e8c5f75fc928ca", + "sha256:6d9c88b787638a451f41f97446a1c9fd416e669b4d9717ae4615bd29de1ac135", + "sha256:755c56beeacac6a24c8e1074f89f34f4373abce8b662470d3aa719ae304931f3", + "sha256:7e40d3f8eb472c1509b12ac2a7e24158ec352fc8567b77ab02c0db053927e339", + "sha256:812eaf4939ef2284d29653bcfee9665f11f013724f07258928f849a2306ea9f9", + "sha256:84df004223fd0550d0ea7a37882e5c889f3c6d45535c639ce9802293b39cd5c9", + "sha256:859f0add98707b182b4867359e12bde806b82483fb12a9ae868a77880fc3b7af", + "sha256:87c4b38288f71acd2106f5d94f575bc2136ea2887fdb5dfe18003c881fa6b370", + "sha256:89fc12c6371bf963809abc46cced4a01ca4f99cba17be5e7d416ed7ef1245d19", + "sha256:9564ac7eb1652c3701ac691ca72934dd3009997c81266807aef924012df2f4b3", + "sha256:9754a5c265f991317de2bac0c70a746efc2b695cf4d49f5d2cddeac36544fb44", + "sha256:a565f48c4aae72d1d3d3f8e8fb7218f5609c964e9c6f68604608e5958b9c60c3", + "sha256:a636160680c6e526b84f85d304e2f0bb4e94f8284dd765a1911de9a40450b10a", + "sha256:a839e25f07e428a87d17d857d9935dd743130e77ff46524abb992b962eb2076c", + "sha256:b62046592b44263fa7570f1117d372ae3f310222af1fc1407416f037fb3af21b", + "sha256:b7f7421841f8db443855d2854e25914a79a1ff48ae92f70d0a5c2f8907ab98c9", + "sha256:ba7ca81b6d60a9f7a0b4b4e175dcc38e8fef4992673d9d6e6879fd6de00dd9b8", + "sha256:bb32ca14b4d04e172c541c69eec5f385f9a075b38fb22d765d8b0ce3af3a0c22", + "sha256:c0ff1c1b4d13e2240821ef23c1efb1f009207cb3f56e16986f713c2b0e7cd37f", + "sha256:c669b440ce46ae3abe9b2d44a913b5fd86bb19eb14a8701e88e3918902ecd345", + "sha256:c67734cff78383a1f23ceba3b3239c7deefc62ac2b05fa6a47bcd565771e5880", + "sha256:c6809ebcbf6c1049002b9ac09c127ae43929042ec1f1dbd8bb1615f7cd9f70a0", + "sha256:cd601187476c6bed26a0398353212684c427e10a903aeafa6da40c63309d438b", + "sha256:ebfa374067af240d079ef97b8064478f3bf71038b78b017eb6ec93ede1b6bcec", + "sha256:fbb17c0d0822684b7d6c09915677a32319f16ff1115df5ec05bdcaaee40b35f3", + "sha256:fff1f3a586246110f34dc762098b5afd2de88de507559e63553d7da643053786" ], "index": "pypi", - "version": "==5.2.1" + "version": "==5.4" + }, + "flake8": { + "hashes": [ + "sha256:749dbbd6bfd0cf1318af27bf97a14e28e5ff548ef8e5b1566ccfb25a11e7c839", + "sha256:aadae8761ec651813c24be05c6f7b4680857ef6afaae4651a4eccaef97ce6c3b" + ], + "index": "pypi", + "version": "==3.8.4" }, "isort": { "hashes": [ - "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1", - "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd" + "sha256:c729845434366216d320e936b8ad6f9d681aab72dc7cbc2d51bedc3582f3ad1e", + "sha256:fff4f0c04e1825522ce6949973e83110a6e907750cd92d128b0d14aaaadbffdc" ], - "version": "==4.3.21" + "version": "==5.7.0" }, "lazy-object-proxy": { "hashes": [ @@ -155,13 +214,20 @@ ], "version": "==2.6.0" }, + "pyflakes": { + "hashes": [ + "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92", + "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8" + ], + "version": "==2.2.0" + }, "pylint": { "hashes": [ - "sha256:7dd78437f2d8d019717dbf287772d0b2dbdfd13fc016aa7faa08d67bccc46adc", - "sha256:d0ece7d223fe422088b0e8f13fa0a1e8eb745ebffcb8ed53d3e95394b6101a1c" + "sha256:718b74786ea7ed07aa0c58bf572154d4679f960d26e9641cc1de204a30b87fc9", + "sha256:e71c2e9614a4f06e36498f310027942b0f4f2fde20aebb01655b31edc63b9eaf" ], "index": "pypi", - "version": "==2.5.3" + "version": "==2.6.2" }, "six": { "hashes": [ @@ -172,10 +238,10 @@ }, "toml": { "hashes": [ - "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f", - "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88" + "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", + "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" ], - "version": "==0.10.1" + "version": "==0.10.2" }, "wrapt": { "hashes": [ diff --git a/README.md b/README.md index 187f8c9..da78415 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ # Dynatrace Python3 API Framework -Testing the "Test" Branch +![Lint Code Base](https://github.com/dynatrace-api-framework/python3-framework/workflows/Lint%20Code%20Base/badge.svg) **Python Requirements**: Requests -Python >= 3.4 (Built and tested with Python 3.8) +Python >= 3.8 **How To Use** diff --git a/change_variables.py b/change_variables.py index aba3923..e921d13 100644 --- a/change_variables.py +++ b/change_variables.py @@ -4,14 +4,14 @@ import os -def replace_set(set_file): +def replace_set(new_set_file): """Replace Variable File""" # Options are Darwin, Linux, Java and Windows. Java not supported if "Windows" in system(): os.system("copy variable_sets\\" + - str(set_file) + ".py user_variables.py") + str(new_set_file) + ".py user_variables.py") else: - os.system("cp variable_sets/" + str(set_file) + + os.system("cp variable_sets/" + str(new_set_file) + ".py user_variables.py") diff --git a/dynatrace/cluster/config.py b/dynatrace/cluster/config.py index 0d51613..6c08093 100644 --- a/dynatrace/cluster/config.py +++ b/dynatrace/cluster/config.py @@ -1,19 +1,45 @@ -import dynatrace.requests.request_handler as rh +"""Cluster Config Operations for Dynatrace Managed""" +import dynatrace.framework.request_handler as rh def get_node_info(cluster): + """Get Current Cluster Information + + Args: + cluster (cluster dict): Currently selected cluster + + Returns: + dict: cluster node info dictionary + """ response = rh.make_api_call(cluster=cluster, endpoint=rh.ClusterAPIs.CLUSTER) return response.json() def get_node_config(cluster): + """Get current cluster config for each node + + Args: + cluster (cluster dict): Currently selected cluster + + Returns: + dict: current cluster configuration properties + """ response = rh.make_api_call(cluster=cluster, endpoint=rh.ClusterAPIs.CONFIG) return response.json() def set_node_config(cluster, json): + """Set cluster config for each node + + Args: + cluster (cluster dict): Currently selected cluster + json (dict): Dict of all desired settings + + Returns: + int: status code of pass/failed + """ response = rh.make_api_call(cluster=cluster, endpoint=rh.ClusterAPIs.CONFIG, method=rh.HTTP.POST, diff --git a/dynatrace/cluster/ssl.py b/dynatrace/cluster/ssl_cert.py similarity index 95% rename from dynatrace/cluster/ssl.py rename to dynatrace/cluster/ssl_cert.py index b09e4e4..7f50088 100644 --- a/dynatrace/cluster/ssl.py +++ b/dynatrace/cluster/ssl_cert.py @@ -1,6 +1,6 @@ #!/bin/python3 """Cluster SSL Certificate Operations""" -import dynatrace.requests.request_handler as rh +import dynatrace.framework.request_handler as rh def get_cert_details(cluster, entity_type, entity_id): diff --git a/dynatrace/cluster/sso.py b/dynatrace/cluster/sso.py index df73b0a..efd8816 100644 --- a/dynatrace/cluster/sso.py +++ b/dynatrace/cluster/sso.py @@ -1,7 +1,7 @@ """SSO Operations for Dynatrace""" -import dynatrace.requests.request_handler as rh +import dynatrace.framework.request_handler as rh -ENDPOINT = "sso/ssoProvider" +ENDPOINT = "/api/v1.0/onpremise/sso/ssoProvider" def disable_sso(cluster): @@ -45,6 +45,14 @@ def enable_sso(cluster, disable_local=False, groups_enabled=False, is_openid=Fal def get_sso_status(cluster): + """Current Settings of SSO + + Args: + cluster (cluster dict): Currently selected cluster + + Returns: + dict: All SSO settings in their current setting + """ response = rh.make_api_call(cluster=cluster, endpoint=ENDPOINT) return response.json() diff --git a/dynatrace/cluster/user_groups.py b/dynatrace/cluster/user_groups.py index c073bac..72db000 100644 --- a/dynatrace/cluster/user_groups.py +++ b/dynatrace/cluster/user_groups.py @@ -1,8 +1,7 @@ #!/bin/python3 """Cluster Group Operations""" -import user_variables -from dynatrace.requests import request_handler as rh -from dynatrace.tenant import management_zones as mzh +import user_variables # pylint: disable=import-error +from dynatrace.framework import request_handler as rh MZ_USER_PERMISSONS = { "access_env": "VIEWER", @@ -13,6 +12,18 @@ def generate_group_name(template, user_type, tenant, app_name): + """Generate User Group according to template + + Args: + template (str): template with replacable values for variables + user_type (str): user permission type + tenant (str): tenant for user_group to match to + app_name (str): Application name + + Returns: + [type]: [description] + """ + # TODO Refactor for more replacements template = template.replace("{USER_TYPE}", user_type) template = template.replace("{TENANT}", tenant) template = template.replace("{APP_NAME}", app_name) @@ -59,6 +70,12 @@ def create_app_groups_setwide(app_name): def delete_app_groups(cluster, app_name): + """Delete Uesr Groups for Application + + Args: + cluster (cluster dict): Currently selected cluster + app_name (str): Application to remove all groups + """ role_types = user_variables.USER_GROUPS['role_types'] role_tenants = user_variables.USER_GROUPS['role_tenants'] @@ -79,34 +96,3 @@ def delete_app_groups_setwide(app_name): for cluster in user_variables.FULL_SET.values(): if cluster['is_managed']: delete_app_groups(cluster, app_name) - - -def create_app_clusterwide(cluster, app_name, zones=None): - """Create App User Groups and Management Zones""" - # Create Standard App MZs - mz_list = {} - for tenant_key in cluster['tenant'].keys(): - mzh.add_management_zone( - cluster, - tenant_key, - str.upper(app_name) - ) - if tenant_key in zones: - mz_list[tenant_key] = [] - for zone in zones[tenant_key]: - mz_id = mzh.add_management_zone( - cluster, - tenant_key, - str.upper(app_name), - zone - ) - if mz_id is not None: - mz_list[tenant_key].append(mz_id) - - # Create User Groups - user_groups = create_app_groups(cluster, app_name) - print(user_groups) - - # for tenant in user_variables.USER_GROUPS['role_tenants']: - # if "access_env" in user_groups [tenant]: - # add_mz_to_user diff --git a/dynatrace/cluster/users.py b/dynatrace/cluster/users.py index 2331503..621fab8 100644 --- a/dynatrace/cluster/users.py +++ b/dynatrace/cluster/users.py @@ -1,6 +1,6 @@ """User Operations in Cluster Mangement""" -import dynatrace.requests.request_handler as rh -from dynatrace.exceptions import ManagedClusterOnlyException +import dynatrace.framework.request_handler as rh +from dynatrace.framework.exceptions import ManagedClusterOnlyException # TODO add check for is_managed @@ -51,9 +51,11 @@ def get_user(cluster, user_id, ignore_saas=True): def delete_user(cluster, user_id, ignore_saas=True): """Delete a Single User""" check_is_managed(cluster, ignore_saas) - response = rh.cluster_delete(cluster=cluster, - method=rh.HTTP.DELETE, - endpoint=f"{rh.ClusterAPIs.USERS}/{user_id}") + response = rh.make_api_call( + cluster=cluster, + endpoint=f"{rh.ClusterAPIs.USERS}/{user_id}", + method=rh.HTTP.DELETE, + ) return response.json() diff --git a/dynatrace/exceptions.py b/dynatrace/framework/exceptions.py similarity index 68% rename from dynatrace/exceptions.py rename to dynatrace/framework/exceptions.py index 48367a8..2b3d3b7 100644 --- a/dynatrace/exceptions.py +++ b/dynatrace/framework/exceptions.py @@ -5,21 +5,30 @@ class InvalidAPIResponseException (Exception): + """The framework did not get an expected result from the Dynatrace API""" def __init__(self, message): + super().__init__(message) print(message, file=stderr) class InvalidDateFormatException(ValueError): + """The Date provided does not match the format needed""" def __init__(self, required_format): + super().__init__() self.message = f"Incorrect Date for following entry: {required_format}" + class InvalidScopeException(ValueError): + """The Scope is incomplete or misconfigured""" def __init__(self, required_format): + super().__init__() self.required_format = required_format print("Invalid scope used. Tag required for management zone, matching rule: %s", required_format, file=stderr) class ManagedClusterOnlyException(TypeError): + """The operation is only supported on a managed cluster""" def __init__(self): + super().__init__() print("This operation is only supported on Dynatrace Managed!", file=stderr) diff --git a/dynatrace/framework/log_handler.py b/dynatrace/framework/log_handler.py new file mode 100644 index 0000000..09f1015 --- /dev/null +++ b/dynatrace/framework/log_handler.py @@ -0,0 +1,59 @@ +"""Module for logging throughout the framework.""" +import os +import logging +from logging import handlers +from dynatrace.framework import settings + +logging.root.setLevel(logging.NOTSET) + + +def get_logger(name, filename="Framework.log"): + """Sets up a logger and returns it for use throughout the framework. + Actual configuration parameters are exposed in framework settings. + \n + @param name (str) - name of the logger. defaults to __name__ + @param filename (str) - name of the log file (in case of logging to file) + Defaults to Framework.log + \n + @returns Logger - logger to be used in framework + """ + enabled = settings.get_setting("LOG_ENABLED") + output = settings.get_setting("LOG_OUTPUT") + folder = settings.get_setting("LOG_DIR") + level = settings.get_setting("LOG_LEVEL") + + logger = logging.getLogger(name) + + log_format = logging.Formatter( + fmt="[%(asctime)s][%(module)s][%(funcName)s][%(levelname)s] %(message)s", + datefmt="%Y-%b-%d %H:%M:%S" + ) + + if enabled: + if "FILE" in output: + if folder is None: + raise ValueError( + "Could not setup logging - missing folder from settings." + ) + if not os.path.exists(folder): + os.makedirs(folder) + file_handler = handlers.RotatingFileHandler( + filename=f"{folder}/{filename}", + delay=True, + maxBytes=1000000, + backupCount=5 + ) + file_handler.setFormatter(log_format) + file_handler.setLevel(level) + logger.addHandler(file_handler) + + if "CONSOLE" in output: + console_handler = logging.StreamHandler() + console_handler.setFormatter(log_format) + console_handler.setLevel(level) + logger.addHandler(console_handler) + else: + # Essentially disables logging + logger.setLevel(logging.CRITICAL+1) + + return logger diff --git a/dynatrace/framework/request_handler.py b/dynatrace/framework/request_handler.py new file mode 100644 index 0000000..136f9d1 --- /dev/null +++ b/dynatrace/framework/request_handler.py @@ -0,0 +1,358 @@ +"""Make API Request to available Dynatrace API""" +from enum import Enum, auto +import time +import functools +from copy import deepcopy +import requests +from dynatrace.framework import log_handler +from dynatrace.framework.settings import get_cluster_dict +from dynatrace.framework.exceptions import InvalidAPIResponseException, ManagedClusterOnlyException + +requests.packages.urllib3.disable_warnings() # pylint: disable=no-member +logger = log_handler.get_logger(__name__) + +HTTPS_STR = "https://" + + +class ClusterAPIs(Enum): + """ + Enum representing Dynatrace Cluster REST API endpoints.\n + Use these values when adding the 'endpoint' argument. + """ + BASE = "/api/v1.0/onpremise" + CLUSTER = f"{BASE}/cluster" + CONFIG = f"{CLUSTER}/configuration" + CONFIG_STATUS = f"{CONFIG}/status" + SSL = f"{BASE}/sslCertificate" + SSL_STORE = f"{SSL}/store" + SSO = "" # Need to confirm endpoint + GROUPS = f"{BASE}/groups" + USERS = f"{BASE}/users" + + def __str__(self): + return str(self.value) + + +class TenantAPIs(Enum): + """ + Enum representing Dynatrace Tenant REST API endpoints.\n + Use these values when adding the 'endpoint' argument. + """ + TIMESERIES = "/api/v1/timeseries" + PROBLEM_DETAILS = "/api/v1/problem/details" + PROBLEM_FEED = "/api/v1/problem/feed" + PROBLEM_STATUS = "/api/v1/problem/status" + PROBLEMS = "/api/v2/problems" + DEPLOY_ONEAGENT = "/api/v1/deployment/installer/agent" + DEPLOY_ONEAGENT_CONNECTION_INFO = "/api/v1/deployment/installer/agent/connectioninfo" + DEPLOY_ONEAGENT_CONNECTION_ENDPOINTS = \ + "/api/v1/deployment/installer/agent/connectioninfo/endpoints" + DEPLOY_ACTIVEGATE = "/api/v1/deployment/installer/gateway" + DEPLOY_BOSH = "/api/v1/deployment/boshrelease" + EVENTS = "/api/v1/events" + USER_SESSIONS = "/api/v1/userSessionQueryLanguage" + TOKENS = "/api/v1/tokens" + SYNTHETIC_MONITORS = "/api/v1/synthetic/monitors" + SYNTHETIC_LOCATIONS = "/api/v1/synthetic/locations" + SYNTHETIC_NODES = "/api/v1/synthetic/nodes" + ENTITIES = "/api/v2/entities" + METRICS = "/api/v2/metrics" + TAGS = "/api/v2/tags" + NETWORK_ZONES = "/api/v2/networkZones" + MANAGEMENT_ZONES = "/api/config/v1/managementZones" + V1_TOPOLOGY = "/api/v1/entity" + MAINTENANCE_WINDOWS = "/api/config/v1/maintenanceWindows" + ONEAGENTS = "/api/v1/oneagents" + EXTENSIONS = "/api/config/v1/extensions" + REQUEST_ATTRIBUTES = "/api/config/v1/service/requestAttributes/" + REQUEST_NAMING = "/api/config/v1/service/requestNaming" + + def __str__(self): + return str(self.value) + + +class HTTP(Enum): + ''' + Enum representing HTTP request methods.\n + Use these values when adding the 'method' argument. + ''' + GET = auto() + PUT = auto() + POST = auto() + DELETE = auto() + + def __str__(self): + return str(self.name) + + def __repr__(self): + return str(self.name) + + +def slow_down(func): + """ + Decorator for slowing down API requests. In case of SaaS limits are as low + as 50/min. If current call is within the last 25% remaining requests (until + the limit is reached) then a slow down of 1 sec is applied. + *** Should only use to decorate API-calling functions *** + """ + @functools.wraps(func) + def wrapper(*args, **kwargs): + response = func(*args, **kwargs) + + # Get the cluster from wrapped function + if 'cluster' in kwargs: + cluster = kwargs.get('cluster') + else: + cluster = args[0] + + # Only slow-down SaaS + if not cluster.get('is_managed') and 'x-ratelimit-remaining' in response.headers: + # Standard Dynatrace response headers + req_remaining = int(response.headers.get('x-ratelimit-remaining')) + req_limit = int(response.headers.get('x-ratelimit-limit')) + # If 75% requests already made, slow down + if req_remaining/req_limit <= 0.25: + logger.info( + "API rate limit approaching. Introducing 1 sec delay between calls." + ) + time.sleep(1) + + return response + return wrapper + + +@slow_down +def make_api_call(cluster, endpoint, tenant=None, method=HTTP.GET, **kwargs): + """Function makes an API call in a safe way. + It takes into account any API rate limits. This will ensure the API call + will always go through. The program will wait for the limit to reset if + needed. + \n + @param cluster (dict) - Cluster dictionary from variable_set\n + @param endpoint (str) - API endpoint to call.\n + @param tenant (str) - String of tenant name used in cluster dictionary\n + @param method (str) - HTTP method to use in call. Use HTTP enum. + \n + @kwargs params (dict) - query string parameters\n + @kwargs json (dict) - request body to be sent as JSON\n + @kwargs body (str) - request body to be sent as plain text + \n + @returns - response from request\n + """ + cluster_dict = get_cluster_dict(cluster) + # cluster_dict = cluster + # Set the right URL for the operation + url = f"{generate_tenant_url(cluster_dict, tenant)}{endpoint}" \ + if tenant else f"{HTTPS_STR}{cluster_dict['url']}{endpoint}" + logger.debug("URL used for API call: %s", url) + + # Get correct token for the operation + if 'onpremise' in str(endpoint) or 'cluster' in str(endpoint): + check_managed(cluster_dict) + headers = dict(Authorization=f"Api-Token {cluster_dict['cluster_token']}") + else: + headers = dict(Authorization=f"Api-Token {cluster_dict['api_token'][tenant]}") + + logger.debug("API call details:") + call_details = deepcopy(locals()) + del call_details["cluster_dict"]["api_token"] # Remove API Tokens for all tenants + call_details["cluster_dict"]["cluster_token"] = "*" * 12 + call_details["headers"]["Authorization"] = "*" * 12 + logger.debug(call_details) + # Loop to retry in case of rate limits + while True: + response = requests.request( + method=str(method), + url=url, + headers=headers, + verify=cluster_dict.get('verify_ssl'), + **kwargs + ) + if check_response(response): + break + + return response + +def get_results_whole(cluster, tenant, endpoint, api_version, **kwargs): + """Gets a multi-paged result set and returns it whole. + \n + @param cluster (dict) - Dynatrace cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace tenant (as taken from variable set)\n + @param endpoint (str) - API endpoint to call. Use the TenantAPIs Enum.\n + @param api_version (int) - different APIs have different pagination behaviour. + this maps the pagination behaviour to v1 or v2. + \n + @kwargs item (str) - the item to be retrieved from results response (e.g. entities)\n + \n + @throws ValueError - when V2 API is used but no item is given + """ + # Ensure it always makes at least 1 call + cursor = 1 + # For V2 APIs must specify the item collected + if api_version == 2: + is_v2 = True + if 'item' not in kwargs: + try: + raise ValueError("For V2 APIs you must provide collected item.") + except ValueError: + logger.exception("Error: item missing from V2 API call.", stack_info=True) + raise + item = kwargs['item'] + results = {} + logger.debug("Using V2 pagination for API to collect %s", item) + else: + is_v2 = False + results = [] + logger.debug("Using V1 pagination for API") + + while cursor: + if cursor != 1: + logger.debug("Getting next page of results. Cursor is %s", cursor) + if not is_v2 or endpoint == TenantAPIs.ONEAGENTS: + # V1 and OneAgents require all other query params are preserved + kwargs['nextPageKey'] = cursor + else: + # V2 requires all other query params are removed + kwargs = dict(nextPageKey=cursor) + + response = make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=endpoint, + params=kwargs + ) + + # V2 returns additional data in response that should be preserved + if is_v2: + if cursor == 1: + results = response.json() + else: + results[item].extend(response.json().get(item)) + cursor = response.json().get('nextPageKey') + else: + results.extend(response.json()) + cursor = response.headers.get('next-page-key') + + return results + + +def get_results_by_page(cluster, tenant, endpoint, api_version, **kwargs): + """Gets a multi-paged result set one page at at time. + Useful for parsing very large result sets (e.g. entities) in optimal manner. + \n + @param cluster (dict) - Dynatrace cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace tenant (as taken from variable set)\n + @param endpoint (str) - API endpoint to call. Use the TenantAPIs Enum.\n + @param api_version (int) - different APIs have different pagination behaviour. + this maps the pagination behaviour to v1 or v2. + \n + @kwargs item (str) - the item to be retrieved from results response (e.g. entities)\n + \n + @throws ValueError - when V2 API is used but no item is given + """ + # Ensure it always makes at least 1 call + cursor = 1 + # Check whether pagination behaviour is for V1 or V2 APIs + if api_version == 2: + is_v2 = True + if 'item' not in kwargs: + try: + raise ValueError("For is_v2 APIs you must provide collected item.") + except ValueError: + logger.exception("Error: item missing from V2 API call.", stack_info=True) + raise + item = kwargs['item'] + logger.debug("Using V2 pagination for API to collect %s", item) + else: + logger.debug("Using V1 pagination for API") + is_v2 = False + + while cursor: + if cursor != 1: + logger.debug("Getting next page of results. Cursor is %s", cursor) + # V1 requires all other query params are preserved + if not is_v2 or endpoint == TenantAPIs.ONEAGENTS: + kwargs['nextPageKey'] = cursor + # V2 requires all other query params are removed + else: + kwargs = dict(nextPageKey=cursor) + + response = make_api_call( + cluster=cluster, + endpoint=endpoint, + tenant=tenant, + params=kwargs + ) + + # OneAgents API pagination behaves like V1 but results returned are like V2 + if is_v2 or endpoint == TenantAPIs.ONEAGENTS: + yield response.json().get(item) + cursor = response.json().get('nextPageKey') + else: + yield response.json() + cursor = response.headers.get('next-page-key') + + +def check_response(response): + ''' + Checks if the Response has a Successful Status Code + + @param response - The response variable returned from a request\n + + ''' + logger.debug("Validating the response for the API call.") + logger.debug("Response: %s", response) + headers = response.headers + + if response.status_code == 429: + logger.warning( + "Endpoint request limit of %s was reached!", headers['x-ratelimit-limit'] + ) + # Wait until the limit resets and try again + time_to_wait = int(headers['x-ratelimit-reset'])/1000000 - time.time() + + # Check that there's actually time to wait + if time_to_wait > 0: + logger.warning("Waiting %s sec until the limit resets.", time_to_wait) + time.sleep(float(time_to_wait)) + return False + + if not 200 <= response.status_code <= 299: + try: + raise InvalidAPIResponseException( + "Response Error:\n" + f"{response.url}\n{response.status_code}\n{response.text}" + ) + except InvalidAPIResponseException: + logger.exception("Error: Invalid API response.", stack_info=True) + raise + + return True + + +def check_managed(cluster): + """Checks if the Cluster Operation is valid (Managed) for the current cluster""" + logger.debug("Checking that the cluster is Managed.") + cluster_dict = get_cluster_dict(cluster) + if not cluster_dict['is_managed']: + try: + raise ManagedClusterOnlyException() + except ManagedClusterOnlyException: + logger.exception( + "Error: Managed operation attempted on SaaS cluster.", stack_info=True + ) + raise + + +def generate_tenant_url(cluster, tenant): + """Generate URL based on SaaS or Managed""" + url = HTTPS_STR + cluster_dict = get_cluster_dict(cluster) + if cluster_dict["is_managed"]: + logger.debug("Generating URL for a Managed cluster.") + url += cluster_dict['url'] + "/e/" + cluster_dict['tenant'][tenant] + else: + logger.debug("Generating URL for a SaaS cluster.") + url += cluster_dict['tenant'][tenant] + "." + cluster_dict['url'] + + return url diff --git a/dynatrace/framework/settings.py b/dynatrace/framework/settings.py new file mode 100644 index 0000000..870252c --- /dev/null +++ b/dynatrace/framework/settings.py @@ -0,0 +1,154 @@ +"""Module for retreiving settings safely""" +import json +import yaml + +__IMPORTED_SETTINGS__ = {} + +try: + import user_variables # pylint: disable=import-error + for attr in dir(user_variables): + if not str.startswith(attr, "__"): + __IMPORTED_SETTINGS__[attr] = getattr(user_variables, attr) +except ImportError: + pass + +DefaultSettings = { + "LOG_LEVEL": "ERROR", + "LOG_DIR": "logs/", + "LOG_OUTPUT": [ + "CONSOLE", + ], + "LOG_ENABLED": True, + + # ROLE TYPE KEYS + # access_env + # change_settings + # install_agent + # view_logs + # view_senstive + # change_sensitive + + 'USER_GROUPS': { + "role_types": { + "access_env": "accessenv", + "change_settings": "changesettings", + "view_logs": "logviewer", + "view_sensitive": "viewsensitive" + }, + "role_tenants": [ + "nonprod", + "prod" + ] + }, + 'USER_GROUP_TEMPLATE': "prefix_{USER_TYPE}_{TENANT}_{APP_NAME}_suffix", + 'DEFAULT_TIMEZONE': "UTC", +} + + + + +def get_setting(attribute): + """Fetch setting from user-defined files or else default values + + Args: + attribute (str): attribute/setting to retreive + + Raises: + AttributeError: Setting not defaulted nor user-defined + + Returns: + [type]: attribute in it's correct variable type if found + """ + global __IMPORTED_SETTINGS__ # pylint: disable=global-statement + if attribute in __IMPORTED_SETTINGS__: + return __IMPORTED_SETTINGS__[attribute] + if attribute in DefaultSettings: + return DefaultSettings[attribute] + raise AttributeError( + f"{attribute} is not a valid user variable attribute!") + +def get_cluster_dict(cluster): + """Get Cluster Dict\n + @param cluster_name (dict, str) - Name of the cluster to return\n + @return - Cluster dictionary + """ + if isinstance(cluster, dict): + return cluster + + if 'FULL_SET' in __IMPORTED_SETTINGS__ and cluster in __IMPORTED_SETTINGS__['FULL_SET']: + return __IMPORTED_SETTINGS__['FULL_SET'][cluster] + + raise ValueError ("Cluster not found") + +def create_cluster(cluster_name, url, **kwargs): + """Allow user to dynamically create cluster + \n + @param cluster_name (str) - Name of cluster to be added to the Cluster Set \n + @param url (str) - URL for cluster \n + \n + @kwargs tenant_ids (dict) - Dynatrace tenant name or dictionary of tenant ids\n + @kwargs tenant_tokens (dict) - Dynatrace tenant tokens\n + @kwargs cluster_token - provide cluster_token (Managed only)\n + @kwargs verify_ssl - Verify SSL Cert. Either Bool or path to cert\n + @kwargs is_managed - Manual flag if cluster is a managed instance\n\n + @return - number of entities + """ + + verify_ssl = True if 'verify_ssl' not in kwargs else kwargs['verify_ssl'] + is_managed = True if 'is_managed' not in kwargs else kwargs['is_managed'] + tenant_ids = None if 'tenant_ids' not in kwargs else kwargs['tenant_ids'] + tenant_tokens = None if 'tenant_tokens' not in kwargs else kwargs['tenant_tokens'] + + cluster = { + 'url': url, + 'tenant': {}, + 'api_token': {}, + 'verify_ssl': verify_ssl, + 'is_managed': is_managed, + } + + if 'cluster_token' in kwargs: + cluster['cluster_token'] = kwargs['cluster_token'] + + if isinstance(tenant_ids, dict) and isinstance(tenant_tokens, dict): + for tenant in tenant_ids: + cluster['tenant'][tenant] = tenant_ids [tenant] + cluster['api_token'][tenant] = tenant_tokens [tenant] + elif not(tenant_ids is None and tenant_tokens is None): + raise ValueError("Tenant and tenant token must both be dict") + + if 'FULL_SET' not in __IMPORTED_SETTINGS__: + __IMPORTED_SETTINGS__['FULL_SET'] = {} + __IMPORTED_SETTINGS__['FULL_SET'][cluster_name] = cluster + return __IMPORTED_SETTINGS__['FULL_SET'][cluster_name] + +def add_tenant_to_cluster(cluster, tenant_id, tenant_token, tenant_name): + """Add tenant to predefined cluster""" + if isinstance (cluster, dict): + raise NotImplementedError( + "Cluster dicts are not supported yet. Please use str for the cluster's key" + ) + if isinstance(tenant_id, str) and isinstance(tenant_token, str): + if cluster in __IMPORTED_SETTINGS__['FULL_SET']: + __IMPORTED_SETTINGS__['FULL_SET'][cluster]['tenant'][tenant_name] = tenant_id + __IMPORTED_SETTINGS__['FULL_SET'][cluster]['api_token'][tenant_name] = tenant_token + else: + raise KeyError("Cluster not found") + return __IMPORTED_SETTINGS__['FULL_SET'][cluster] + + raise ValueError("Tenant and tenant token must both be dict") + +def load_settings_from_file(settings_file): + """Assign setting value)s as defined by the cluster""" + global __IMPORTED_SETTINGS__ # pylint: disable=global-statement + + if str.endswith(settings_file, ".yaml") or str.endswith(settings_file, ".yml"): + with open(settings_file) as file: + imported_settings = yaml.load(file, Loader=yaml.FullLoader) + + if str.endswith(settings_file, ".json"): + with open(settings_file) as file: + imported_settings = json.load(file) + + for setting, value in imported_settings.items(): + __IMPORTED_SETTINGS__[setting] = value diff --git a/dynatrace/requests/request_handler.py b/dynatrace/requests/request_handler.py deleted file mode 100644 index 3baf04d..0000000 --- a/dynatrace/requests/request_handler.py +++ /dev/null @@ -1,164 +0,0 @@ -"""Make API Request to available Dynatrace API""" -import requests -import time -from dynatrace.exceptions import InvalidAPIResponseException, ManagedClusterOnlyException -from enum import Enum, auto - -requests.packages.urllib3.disable_warnings() - -HTTPS_STR = "https://" - - -class ClusterAPIs(Enum): - """ - Enum representing Dynatrace Cluster REST API endpoints.\n - Use these values when adding the 'endpoint' argument. - """ - BASE = "/api/v1.0/onpremise" - CLUSTER = f"{BASE}/cluster" - CONFIG = f"{CLUSTER}/configuration" - CONFIG_STATUS = f"{CONFIG}/status" - SSL = f"{BASE}/sslCertificate" - SSL_STORE = f"{SSL}/store" - SSO = "" # Need to confirm endpoint - GROUPS = f"{BASE}/groups" - USERS = f"{BASE}/users" - - def __str__(self): - return self.value - - -class TenantAPIs(Enum): - """ - Enum representing Dynatrace Tenant REST API endpoints.\n - Use these values when adding the 'endpoint' argument. - """ - PROBLEM_DETAILS = "/api/v1/problem/details" - PROBLEM_FEED = "/api/v1/problem/feed" - PROBLEM_STATUS = "/api/v1/problem/status" - DEPLOY_ONEAGENT = "/api/v1/deployment/installer/agent" - DEPLOY_ONEAGENT_CONNECTION_INFO = "/api/v1/deployment/installer/agent/connectioninfo" - DEPLOY_ONEAGENT_CONNECTION_ENDPOINTS = "/api/v1/deployment/installer/agent/connectioninfo/endpoints" - DEPLOY_ACTIVEGATE = "/api/v1/deployment/installer/gateway" - DEPLOY_BOSH = "/api/v1/deployment/boshrelease" - EVENTS = "/api/v1/events" - USER_SESSIONS = "/api/v1/userSessionQueryLanguage" - TOKENS = "/api/v1/tokens" - SYNTHETIC_MONITORS = "/api/v1/synthetic/monitors" - SYNTHETIC_LOCATIONS = "/api/v1/synthetic/locations" - SYNTHETIC_NODES = "/api/v1/synthetic/nodes" - ENTITIES = "/api/v2/entities" - METRICS = "/api/v2/metrics" - TAGS = "/api/v2/tags" - NETWORK_ZONES = "/api/v2/networkZones" - MANAGEMENT_ZONES = "/api/config/v1/managementZones" - V1_TOPOLOGY = "/api/v1/entity" - MAINTENANCE_WINDOWS = "/api/config/v1/maintenanceWindows" - ONEAGENTS = "/api/v1/oneagents" - EXTENSIONS = "/api/config/v1/extensions" - REQUEST_ATTRIBUTES = "/api/config/v1/service/requestAttributes/" - REQUEST_NAMING = "/api/config/v1/service/requestNaming" - - def __str__(self): - return self.value - - -class HTTP(Enum): - ''' - Enum representing HTTP request methods.\n - Use these values when adding the 'method' argument. - ''' - GET = auto() - PUT = auto() - POST = auto() - DELETE = auto() - - def __str__(self): - return self.name - - def __repr__(self): - return self.name - - -def make_api_call(cluster, endpoint, tenant=None, params=None, json=None, method=HTTP.GET): - ''' - Function makes an API call in a safe way, taking into account the rate limits. - This will ensure the API call will always go through, with the program waiting for the limit to reset if needed.\n - - @param cluster - Cluster dictionary from variable_set\n - @param endpoint - API endpoint to call.\n - @param tenant - String of tenant name used in cluster dictionary\n - @param json - dictionary to be converted to JSON request\n - @param method - HTTP method to use in call. Use HTTP enum.\n - \n - @return - response from request\n - ''' - # Set the right URL for the operation - url = f"{generate_tenant_url(cluster, tenant)}{endpoint}" if tenant else f"{HTTPS_STR}{cluster['url']}" - - if not params: - params = {} - - # Get correct token for the operation - if 'onpremise' in str(endpoint) or 'cluster' in str(endpoint): - check_managed(cluster) - headers = dict(Authorization=f"Api-Token {cluster['cluster_token']}") - else: - headers = dict(Authorization=f"Api-Token {cluster['api_token'][tenant]}") - - # Loop to retry in case of rate limits - while True: - response = requests.request( - method=str(method), - url=url, - params=params, - headers=headers, - json=json, - verify=cluster.get('verify_ssl') - ) - if check_response(response): - break - - return response - - -def check_response(response): - ''' - Checks if the Response has a Successful Status Code - - @param response - The response variable returned from a request\n - - ''' - headers = response.headers - - if response.status_code == 429: - print("Endpoint request limit of " - f"{headers['x-ratelimit-limit']} was reached!") - # Wait until the limit resets and try again - time_to_wait = int(headers['x-ratelimit-reset'])/1000000 - time.time() - - # Check that there's actually time to wait - if time_to_wait > 0: - print(f"Waiting {time_to_wait} sec until the limit resets.") - time.sleep(float(time_to_wait)) - return False - elif not 200 <= response.status_code <= 299: - raise InvalidAPIResponseException(f"Response Error:\n{response.url}\n{response.status_code}\n{response.text}") - - return True - - -def check_managed(cluster): - """Checks if the Cluster Operation is valid (Managed) for the current cluster""" - if not cluster['is_managed']: - raise ManagedClusterOnlyException() - - -def generate_tenant_url(cluster, tenant): - """Generate URL based on SaaS or Managed""" - url = HTTPS_STR - if cluster["is_managed"]: - url += cluster['url'] + "/e/" + cluster['tenant'][tenant] - else: - url += cluster['tenant'][tenant] + "." + cluster['url'] - return url diff --git a/dynatrace/tenant/entities.py b/dynatrace/tenant/entities.py new file mode 100644 index 0000000..9dff064 --- /dev/null +++ b/dynatrace/tenant/entities.py @@ -0,0 +1,493 @@ +"""Module for Entities API operations""" + +from enum import Enum, auto +from dynatrace.framework import log_handler +from dynatrace.framework import request_handler as rh + +logger = log_handler.get_logger(__name__) + + +class EntityTypes(Enum): + """Accepted values for EntityType arguments""" + HTTP_CHECK = auto() + RELATIONAL_DATABASE_SERVICE = auto() + APPLICATION = auto() + KUBERNETES_NODE = auto() + CONTAINER_GROUP_INSTANCE = auto() + OPENSTACK_COMPUTE_NODE = auto() + QUEUE = auto() + EBS_VOLUME = auto() + OPENSTACK_PROJECT = auto() + PROCESS_GROUP = auto() + EC2_INSTANCE = auto() + GEOLOC_SITE = auto() + DEVICE_APPLICATION_METHOD_GROUP = auto() + AWS_AVAILABILITY_ZONE = auto() + SYNTHETIC_TEST_STEP = auto() + AZURE_STORAGE_ACCOUNT = auto() + AZURE_IOT_HUB = auto() + AWS_APPLICATION_LOAD_BALANCER = auto() + CLOUD_APPLICATION_NAMESPACE = auto() + BROWSER = auto() + GEOLOCATION = auto() + HTTP_CHECK_STEP = auto() + HYPERVISOR_DISK = auto() + AZURE_APP_SERVICE_PLAN = auto() + NEUTRON_SUBNET = auto() + S3BUCKET = auto() + NETWORK_INTERFACE = auto() + QUEUE_INSTANCE = auto() + APPLICATION_METHOD_GROUP = auto() + GCP_ZONE = auto() + OPENSTACK_VM = auto() + MOBILE_APPLICATION = auto() + PROCESS_GROUP_INSTANCE = auto() + HOST_GROUP = auto() + SYNTHETIC_LOCATION = auto() + SERVICE_INSTANCE = auto() + GOOGLE_COMPUTE_ENGINE = auto() + AZURE_SERVICE_BUS_TOPIC = auto() + AZURE_TENANT = auto() + CLOUD_APPLICATION = auto() + AZURE_EVENT_HUB = auto() + DEVICE_APPLICATION_METHOD = auto() + AZURE_SERVICE_BUS_NAMESPACE = auto() + VIRTUALMACHINE = auto() + ELASTIC_LOAD_BALANCER = auto() + AZURE_SUBSCRIPTION = auto() + AZURE_REDIS_CACHE = auto() + AWS_NETWORK_LOAD_BALANCER = auto() + BOSH_DEPLOYMENT = auto() + EXTERNAL_SYNTHETIC_TEST_STEP = auto() + DOCKER_CONTAINER_GROUP_INSTANCE = auto() + APPLICATION_METHOD = auto() + AZURE_CREDENTIALS = auto() + AZURE_MGMT_GROUP = auto() + SERVICE_METHOD_GROUP = auto() + AZURE_FUNCTION_APP = auto() + AZURE_SQL_SERVER = auto() + AZURE_SQL_DATABASE = auto() + AZURE_VM = auto() + OPENSTACK_AVAILABILITY_ZONE = auto() + SWIFT_CONTAINER = auto() + CLOUD_APPLICATION_INSTANCE = auto() + SERVICE = auto() + VMWARE_DATACENTER = auto() + AZURE_EVENT_HUB_NAMESPACE = auto() + VCENTER = auto() + AZURE_SERVICE_BUS_QUEUE = auto() + SERVICE_METHOD = auto() + OS = auto() + CONTAINER_GROUP = auto() + AWS_CREDENTIALS = auto() + AZURE_SQL_ELASTIC_POOL = auto() + DATASTORE = auto() + HYPERVISOR_CLUSTER = auto() + SYNTHETIC_TEST = auto() + EXTERNAL_SYNTHETIC_TEST = auto() + AUTO_SCALING_GROUP = auto() + CUSTOM_APPLICATION = auto() + AZURE_API_MANAGEMENT_SERVICE = auto() + DISK = auto() + HYPERVISOR = auto() + CUSTOM_DEVICE = auto() + AZURE_REGION = auto() + CINDER_VOLUME = auto() + DOCKER_CONTAINER_GROUP = auto() + KUBERNETES_CLUSTER = auto() + AZURE_WEB_APP = auto() + HOST = auto() + AZURE_LOAD_BALANCER = auto() + OPENSTACK_REGION = auto() + AWS_LAMBDA_FUNCTION = auto() + AZURE_APPLICATION_GATEWAY = auto() + AZURE_VM_SCALE_SET = auto() + AZURE_COSMOS_DB = auto() + DYNAMO_DB_TABLE = auto() + CUSTOM_DEVICE_GROUP = auto() + + def __str__(self): + return str(self.name) + + def __repr__(self): + return str(self.name) + + +def get_entities_tenantwide(cluster, tenant, entity_type, **kwargs): + """Get all Entities of specified type in the tenant.\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param entity_type - use EntityTypes enum\n + @kwargs entitySelector - used to filter entities\n + @kwargs from - timeframe start\n + @kwargs to - timeframe end\n + @kwargs fields - entity detail fields\n\n + @return - List of all entities matching the selection. + """ + # If entitySelector already present, don't overwrite + if 'entitySelector' in kwargs: + kwargs['entitySelector'] += f',type({entity_type})' + else: + kwargs['entitySelector'] = f'type({entity_type})' + + logger.info("Getting whole result set for entities in %s tenant", tenant) + response = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + api_version=2, + item='entities', + endpoint=rh.TenantAPIs.ENTITIES, + **kwargs + ) + return response.get('entities') + + +def get_entities_clusterwide(cluster, entity_type, aggregated=True, **kwargs): + """Get all Entities of specified type in the cluster. + \n + @param cluster - Dynatrace Cluster (from variable set)\n + @param entity_type - use EntityTypes enum\n + @param aggregated - whether results should be split by tenant\n + @kwargs entitySelector - used to filter entities\n + @kwargs from - timeframe start\n + @kwargs to - timeframe end\n + @kwargs fields - entity detail fields\n\n + @return - List of all entities matching the selection if not aggregated. + Dictionary with tenants as keys if aggregated. + """ + split_entities = {} + all_entities = [] + + logger.info("Getting whole result set for entities in cluster") + for tenant in cluster['tenant']: + tenant_entities = get_entities_tenantwide( + cluster=cluster, + tenant=tenant, + entity_type=entity_type, + **kwargs + ) + all_entities.extend(tenant_entities) + split_entities[tenant] = tenant_entities + + return all_entities if aggregated else split_entities + + +def get_entities_setwide(full_set, entity_type, aggregated=True, **kwargs): + """Get all Entities of specified type in the full cluster set. + \n + @param full_set - Variable set (from user variables)\n + @param entity_type - use EntityTypes enum\n + @param aggregated - whether results should be split by cluster\n + @kwargs entitySelector - used to filter entities\n + @kwargs from - timeframe start\n + @kwargs to - timeframe end\n + @kwargs fields - entity detail fields\n\n + @return - List of all entities matching the selection if not aggregated. + Dictionary with clusters as keys if aggregated. + """ + split_entities = {} + all_entities = [] + + logger.info("Getting whole result set for entities in all clusters") + for cluster in full_set: + cluster_entities = get_entities_clusterwide( + cluster=full_set[cluster], + entity_type=entity_type, + **kwargs + ) + + all_entities.extend(cluster_entities) + split_entities[cluster] = cluster_entities + + return all_entities if aggregated else split_entities + + +def get_entities_by_page(cluster, tenant, entity_type, **kwargs): + """Get all Entities of specified type, page by page.\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param entity_type - use EntityTypes enum\n + @kwargs entitySelector - used to filter entities\n + @kwargs from - timeframe start\n + @kwargs to - timeframe end\n + @kwargs fields - entity detail fields\n + @kwargs pageSize - max. number of entities returned per call.\n\n + @return - Generator object (page by page) of all entities that match. + """ + # If entitySelector already present, don't overwrite + if 'entitySelector' in kwargs: + kwargs['entitySelector'] += f',type({entity_type})' + else: + kwargs['entitySelector'] = f'type({entity_type})' + + logger.info("Getting paged result set for entities in %s tenant", tenant) + response = rh.get_results_by_page( + cluster=cluster, + tenant=tenant, + endpoint=rh.TenantAPIs.ENTITIES, + api_version=2, + item='entities', + **kwargs + ) + + for entities in response: + yield entities + + +def get_entity(cluster, tenant, entity_id, **kwargs): + """Get the details of an entity specified by ID. + You can use more than one ID if they're comma separated (id-1,id-2).\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param entity_id - ID of monitored Entity\n + @kwargs entitySelector - used to filter entities\n + @kwargs from - timeframe start\n + @kwargs to - timeframe end\n + @kwargs fields - entity detail fields\n + @return - One entity for one ID. List of entities otherwise. + """ + # If entitySelector already present, don't overwrite + if 'entitySelector' in kwargs: + kwargs['entitySelector'] += f',entityId({entity_id})' + else: + kwargs['entitySelector'] = f'entityId({entity_id})' + + logger.info("Getting entity details for ID(s) %s", entity_id) + response = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=rh.TenantAPIs.ENTITIES, + api_version=2, + item='entities', + **kwargs + ) + + if response.get('totalCount') == 1: + return response.get('entities')[0] + + return response.get('entities') + + +def get_entity_count_tenantwide(cluster, tenant, entity_type, **kwargs): + """Get the total number of entities of a given type in the tenant.\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param entity_type - use EntityTypes enum for this\n + @kwargs entitySelector - used to filter entities\n + @kwargs from - timeframe start\n + @kwargs to - timeframe end\n\n + @return - number of entities. + """ + if 'from' not in kwargs: + kwargs['from'] = "now-24h" + # pageSize is irrelevant, so make the response size minimal + kwargs['pageSize'] = 1 + # fields are irrelevant, so make the response size minimal + kwargs['fields'] = "" + + # If entitySelector already present, don't overwrite + if 'entitySelector' in kwargs: + kwargs['entitySelector'] += f',type({entity_type})' + else: + kwargs['entitySelector'] = f'type({entity_type})' + + logger.info("Getting entity count from %s tenant", tenant) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=rh.TenantAPIs.ENTITIES, + params=kwargs + ) + + return response.json().get('totalCount') + + +def get_entity_count_clusterwide(cluster, entity_type, **kwargs): + """Get total number of entitites of a given type in the cluster.\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param entity_type - use EntityTypes enum for this\n + @kwargs entitySelector - used to filter entities\n + @kwargs from - timeframe start\n + @kwargs to - timeframe end\n\n + @return - number of entities + """ + count = 0 + logger.info("Getting entity count from cluster") + for tenant in cluster['tenant']: + count += get_entity_count_tenantwide( + cluster=cluster, + tenant=tenant, + entity_type=entity_type, + **kwargs + ) + return count + + +def get_entity_count_setwide(full_set, entity_type, **kwargs): + """Get total number of entities of a give type in the cluster set.\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param entity_type - use EntityTypes enum for this\n + @kwargs entitySelector - used to filter entities\n + @kwargs from - timeframe start\n + @kwargs to - timeframe end\n\n + @return - number of entities + """ + count = 0 + logger.info("Getting entity count from all clusters") + for cluster in full_set: + count += get_entity_count_clusterwide( + cluster=full_set[cluster], + entity_type=entity_type, + **kwargs + ) + return count + + +def add_tags(cluster, tenant, tag_list, **kwargs): + """Add tags to entities. + Must specify at least an Entity Type or ID in entitySelector.\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param tag_list - list of tags as dictionaries with "key" and + optionally "value" attributes\n + @kwargs entitySelector - must specify at least either type or entityId. + use EntityTypes enum for type.\n\n + @throws TypeError - if tag_list is empty or not a list\n + @throws ValueError - if neither entity_type nor entity_id are specified + """ + # Sanity checking, error handling + if not tag_list: + try: + raise TypeError("No tags provided") + except TypeError: + logger.exception("Error: No tags provided", stack_info=True) + raise + if not isinstance(tag_list, list): + try: + raise TypeError("tags_list is not a list") + except TypeError: + logger.exception("Error: tags_list must be a list", stack_info=True) + raise + if 'type' not in kwargs['entitySelector'] \ + and 'entityId' not in kwargs['entitySelector']: + try: + raise ValueError("entitySelector must have at least type or entityId") + except ValueError: + logger.exception( + "Error: entitySelector missing required values", stack_info=True + ) + raise + + logger.info("Adding tags to entities") + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.POST, + endpoint=rh.TenantAPIs.TAGS, + params=kwargs, + json=dict(tags=tag_list) + ) + + return response + + +def delete_tag(cluster, tenant, tag_key, tag_value=None, **kwargs): + """Delete a tag from entities. + Must specify at least an Entity Type or ID in entitySelector.\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param tag_key - the key of the tag(s) to be deleted\n + @param tag_value - the value for the tag key to be deleted. + Use "all" to delete all values for the key.\n + @kwargs entitySelector - must specify at least either type or entityId. + use EntityTypes enum for type.\n\n + @throws TypeError - if tag_key is empty or missing\n + @throws ValueError - if neither entity_type nor entity_id are specified + """ + # Sanity checking, error handling + if not tag_key: + try: + raise TypeError("No tag key provided") + except TypeError: + logger.exception("Error: Must provide a tag key", stack_info=True) + raise + if 'type' not in kwargs['entitySelector'] \ + and 'entityId' not in kwargs['entitySelector']: + try: + raise ValueError("entitySelector must have at least type or entityId") + except ValueError: + logger.exception( + "Error: entitySelector missing required values", stack_info=True + ) + raise + + # Set params for tag key & value + kwargs['key'] = tag_key + if tag_value == "all": + kwargs['deleteAllWithKey'] = True + logger.info("Deleting all %s tags from entities", tag_key) + elif tag_value: + kwargs['value'] = tag_value + logger.info("Deleting %s:%s tags from entities", tag_key, tag_value) + else: + logger.info("Deleting %s tag from entities.", tag_key) + + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.DELETE, + endpoint=rh.TenantAPIs.TAGS, + params=kwargs + ) + return response + + +def custom_device(cluster, tenant, json_data): + """Creates or updates a custom device based on given JSON data.\n + + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @param json_data - device properties in JSON format. Valid properties are:\n + ---------- str: customDeviceId (mandatory)\n + ---------- str: displayName (mandatory)\n + ---------- str: group\n + ---------- list(str): ipAddress\n + ---------- list(int): listenPorts\n + ---------- str: type (mandatory)\n + ---------- str: faviconUrl\n + ---------- str: configUrl\n + ---------- dict(str: str): properties\n + ---------- list(str): dnsNames\n + @throws ValueError - if mandatory properties missing from JSON data + """ + # Sanity checking, error handling + try: + if not json_data.get('customDeviceId') or not json_data.get('displayName'): + raise ValueError("JSON data is missing Device ID and/or Name.") + # json_data.type can be NoneType when device already exists + if not get_entity(cluster, tenant, json_data.get('customDeviceId')) \ + and not json_data.get('type'): + raise ValueError("type must be in JSON data when creating a device") + except ValueError: + logger.exception("Error: Missing mandatory details.", stack_info=True) + raise + + logger.info("Creating/updating custom device.") + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.POST, + json=json_data, + endpoint=f'{rh.TenantAPIs.ENTITIES}/custom' + ) + return response diff --git a/dynatrace/tenant/extensions.py b/dynatrace/tenant/extensions.py index a5967e8..6cb7186 100644 --- a/dynatrace/tenant/extensions.py +++ b/dynatrace/tenant/extensions.py @@ -1,34 +1,308 @@ -from dynatrace.requests import request_handler as rh +"""Operations Interacting with Dynatrace Extensions API""" +from dynatrace.framework import request_handler as rh, log_handler +from dynatrace.tenant import metrics ENDPOINT = rh.TenantAPIs.EXTENSIONS +logger = log_handler.get_logger(__name__) -def get_all_extensions(cluster, tenant, params=None): - """ Gets the list of all extensions available""" - # TODO: Add pagination +def get_all_extensions(cluster, tenant, page_size=200): + """Gets a list of all extensions available on the tenant. + List is returned whole regardless of page size; page size can be used to control the + number of API calls. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param page_size (int) - page size between 1 and 500 (default 200) + \n + @returns list - list of extensions + """ + logger.info("Getting all extensions in %s tenant", tenant) + extension_list = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + api_version=2, + pageSize=page_size, + item="extensions" + ).get('extensions') - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=ENDPOINT, - params=params) - return response.json().get('extensions') + return extension_list def get_extension_details(cluster, tenant, extension_id): - """ Get the details of a specific extension""" + """Get the details of a specific extension. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to get the details for + \n + @returns (dict) - JSON response containing extension details + """ + logger.info("Getting extension details for %s", extension_id) + details = rh.make_api_call( + cluster=cluster, + endpoint=f"{ENDPOINT}/{extension_id}", + tenant=tenant + ).json() - response = rh.make_api_call(cluster=cluster, - endpoint=f"{ENDPOINT}/{extension_id}", - tenant=tenant) - return response.json() + return details -def get_extension_states(cluster, tenant, extension_id, params=None): - """ Gets all the deployment states of a specific extension""" - # TODO: Add pagination - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=f"{ENDPOINT}/{extension_id}/states", - params=params) +def get_extension_metrics(cluster, tenant, extension_id): + """Gets a list of metric IDs that are collected by the extension. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension + \n + @returns list - list of metric IDs + """ + logger.info("Getting metrics collected by extension %s", extension_id) + metric_group = get_extension_details(cluster, tenant, extension_id).get('metricGroup') + ext_metrics = metrics.get_metric_descriptor( + cluster=cluster, + tenant=tenant, + metricSelector=f"ext:{metric_group}.*" + ) - return response.json().get('states') + return list(m.get('metricId') for m in ext_metrics) + + +def get_extension_global_config(cluster, tenant, extension_id): + """Gets the global configuration for a given extension. + Does not apply to ActiveGate extensions. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to get the config for + \n + @returns dict - global configuration + """ + logger.info("Getting global configuration for extension %s", extension_id) + config = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{extension_id}/global" + ).json() + + return config + + +def get_extension_instance_config(cluster, tenant, extension_id, instance_id): + """Gets the configuration for an instance of an extension. + For remote extensions this is an endpoint config, otherwise a host config. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to get the config for\n + @param instance_id (str) - ID of extension instance to get config for + \n + @returns dict - instance configuration + """ + logger.info( + "Getting configuration for instance %s on extension %s", instance_id, extension_id + ) + config = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{extension_id}/instances/{instance_id}" + ).json() + + return config + + +def get_extension_states(cluster, tenant, extension_id): + """Gets all the deployment states (instances) of a specific extension. + For remote extensions these are endpoints, for other extensions these are + processes/hosts. States are independent of global/instance configuration. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to get the states for + \n + @returns list - states/instances of this extension + """ + logger.info("Getting states for extension %s", extension_id) + states = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{extension_id}/states", + api_version=2, + item="states" + ).get('states') + + return states + + +def get_extension_instances(cluster, tenant, extension_id): + """Gets all the configuration instances of a specific extension. + An instance is an endpoint for a remote extension, otherwise a host. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to get the states for + \n + @returns list - configuration instances for this extension + """ + logger.info("Getting instances for extension %s", extension_id) + instances = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{extension_id}/instances", + api_version=2, + item="configurationsList" + ).get('configurationsList') + + return instances + + +def enable_global_config(cluster, tenant, extension_id): + """Enables the global configuration for an extension. + Not applicable to remote extensions. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to enable + \n + @returns dict - HTTP response for the request + """ + config = get_extension_global_config(cluster, tenant, extension_id) + + config['enabled'] = True + logger.info("Enabling global config for extension %s", extension_id) + response = update_global_config(cluster, tenant, extension_id, config) + + return response + + +def disable_global_config(cluster, tenant, extension_id): + """Disables the global configuration for an extension. + Not applicable to remote extensions. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to disable + \n + @returns dict - HTTP response for the request + """ + config = get_extension_global_config(cluster, tenant, extension_id) + + config['enabled'] = False + logger.info("Disabling global config for extension %s", extension_id) + response = update_global_config(cluster, tenant, extension_id, config) + + return response + + +def update_global_config(cluster, tenant, extension_id, config): + """Updates the global configuration for an extension. + Not applicable to remote extensions. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to update\n + @param config (dict) - new configuration as JSON dictionary + \n + @returns dict - HTTP response to request + """ + logger.info("Updating global config for extension %s", extension_id) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{extension_id}/global", + method=rh.HTTP.PUT, + json=config + ) + + return response + + +def enable_instance_config(cluster, tenant, extension_id, instance_id): + """Enables the configuration for an instance of an extension. + Instance is endpoint for a remote extension, otherwise a host. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to enable\n + @param instance_id (str) - ID of extension instance to enable + \n + @returns dict - HTTP response to request + """ + config = get_extension_instance_config( + cluster, tenant, extension_id, instance_id + ) + + config['enabled'] = True + + # API BUG: For remote extensions useGlobal is null, but API call doesn't support it + if config.get('useGlobal') is None: + config['useGlobal'] = False + # API BUG: For remote extensions the extension ID in the config is the instance id + # this needs to be set back to the extension ID otherwise fails. + if 'activeGate' in config: + config['extensionId'] = extension_id + + logger.info("Enabling config for instance %s of %s", instance_id, extension_id) + response = update_instance_config( + cluster, tenant, extension_id, instance_id, config + ) + + return response + + +def disable_instance_config(cluster, tenant, extension_id, instance_id): + """Disables the configuration for an instance of an extension. + Instance is endpoint for a remote extension, otherwise a host. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to enable\n + @param instance_id (str) - ID of extension instance to disable + \n + @returns dict - HTTP response to request + """ + config = get_extension_instance_config( + cluster, tenant, extension_id, instance_id + ) + + config['enabled'] = False + + # API BUG: For remote extensions useGlobal is null, but API call doesn't support it + if config.get('useGlobal') is None: + config['useGlobal'] = False + # API BUG: For remote extensions the extension ID in the config is the instance id + # this needs to be set back to the extension ID otherwise fails. + if 'activeGate' in config: + config['extensionId'] = extension_id + + logger.info("Disabling config for instance %s of %s", instance_id, extension_id) + response = update_instance_config( + cluster, tenant, extension_id, instance_id, config + ) + + return response + + +def update_instance_config(cluster, tenant, extension_id, instance_id, config): + """Updates the configuration for an instance of an extension. + Instance is endpoint for a remote extension, otherwise a host. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - Tenant name (as taken from variable set)\n + @param extension_id (str) - ID of extension to update\n + @param instance_id (str) - ID of extension instance to update + @param config (dict) - new configuration as JSON dictionary + \n + @returns dict - HTTP response to request + """ + logger.info("Updating config for instance %s of %s", instance_id, extension_id) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{extension_id}/instances/{instance_id}", + method=rh.HTTP.PUT, + json=config + ) + + return response diff --git a/dynatrace/tenant/host_groups.py b/dynatrace/tenant/host_groups.py deleted file mode 100644 index 495b012..0000000 --- a/dynatrace/tenant/host_groups.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Host Group Information for Tenant""" -from dynatrace.tenant.topology import hosts as topology_hosts - -# TODO redo export function (break out to export function?) -# def export_host_groups_setwide(full_set): - -# get_host_groups_setwide(full_set) -# with open('txt/HostGroups - ' + envName + '.txt', 'w') as outFile: -# for groupName in hostGroups.values(): -# outFile.write(groupName+"\n") -# print(envName + " writing to 'HostGroups - " + envName + ".txt'") - - -def get_host_groups_tenantwide(cluster, tenant): - params = { - 'relativeTime': 'day', - 'includeDetails': 'true' - } - response = topology_hosts.get_hosts_tenantwide(cluster, - tenant, - params=params) - host_groups = {} - for host in response: - if host.get('hostGroup'): - host_groups[host['hostGroup']['meId']] = host['hostGroup']['name'] - return host_groups - - -def get_host_groups_clusterwide(cluster): - # TODO add split_by_tenant optional variable - host_groups_custerwide = {} - for tenant in cluster['tenant']: - host_groups_custerwide.update( - get_host_groups_tenantwide(cluster, tenant) - ) - return host_groups_custerwide - - -def get_host_groups_setwide(full_set): - # TODO add split_by_tenant optional variable - host_groups_setwide = {} - for cluster in full_set.values(): - host_groups_setwide.update(get_host_groups_clusterwide(cluster)) - return host_groups_setwide diff --git a/dynatrace/tenant/maintenance.py b/dynatrace/tenant/maintenance.py index c1aef4e..7e92a1a 100644 --- a/dynatrace/tenant/maintenance.py +++ b/dynatrace/tenant/maintenance.py @@ -1,10 +1,10 @@ """Maintenance Window Operations""" import datetime import re -import dynatrace.requests.request_handler as rh -import user_variables as uv -from dynatrace.exceptions import InvalidDateFormatException from enum import Enum, auto +import dynatrace.framework.request_handler as rh +from dynatrace.framework.settings import get_setting +from dynatrace.framework.exceptions import InvalidDateFormatException MZ_ENDPOINT = rh.TenantAPIs.MAINTENANCE_WINDOWS @@ -15,8 +15,8 @@ class Suppression(Enum): Types of suppression for create Maintenance Window JSON. Suppression is required Args: - Enum (DETECT_PROBLEMS_AND_ALERT): Full Alerting. Entites in scope will have notes that a Maintenance Window was active - Enum (DETECT_PROBLEMS_DONT_ALERT): Problems detected but alerting profiles in that scope are not triggered + Enum (DETECT_PROBLEMS_AND_ALERT): Full Detection and Alerting during Maintenance Window + Enum (DETECT_PROBLEMS_DONT_ALERT): Problems detected but alerts in scope are not triggered Enum (DONT_DETECT_PROBLEMS): Problem detection completely off for the scope """ DETECT_PROBLEMS_AND_ALERT = auto() @@ -24,10 +24,10 @@ class Suppression(Enum): DONT_DETECT_PROBLEMS = auto() def __str__(self): - return self.name + return str(self.name) def __repr__(self): - return self.name + return str(self.name) class DayOfWeek(Enum): @@ -53,10 +53,10 @@ class DayOfWeek(Enum): SUNDAY = auto() def __str__(self): - return self.name + return str(self.name) def __repr__(self): - return self.name + return str(self.name) class Context(Enum): @@ -71,10 +71,10 @@ class Context(Enum): KUBERNETES = auto() def __str__(self): - return self.name + return str(self.name) def __repr__(self): - return self.name + return str(self.name) class RecurrenceType(Enum): @@ -85,10 +85,10 @@ class RecurrenceType(Enum): WEEKLY = auto() def __str__(self): - return self.name + return str(self.name) def __repr__(self): - return self.name + return str(self.name) class FilterType(Enum): @@ -187,27 +187,49 @@ class FilterType(Enum): VMWARE_DATACENTER = auto() def __str__(self): - return self.name + return str(self.name) def __repr__(self): - return self.name + return str(self.name) def validate_datetime(datetime_text, required_format): + """Validate input against expected DateTime format + + Args: + datetime_text (str): Time inputted + required_format (str): Expected format to validate against + + Raises: + InvalidDateFormatException: Used for incorrect format provided + """ try: datetime.datetime.strptime(datetime_text, required_format) except ValueError: - raise InvalidDateFormatException(required_format) + raise InvalidDateFormatException(required_format) from ValueError def generate_tag_scope(tag, filter_type=None, management_zone_id=None): + """Generating Tag portion of scope + + Args: + tag (list, dict, str): single or collection of tags + filter_type (str, optional): Type of entity to match against. Defaults to None. + management_zone_id (str, optional): Management Zone to match against. Defaults to None. + + Raises: + ValueError: Filter Type is not in acceptable values + + Returns: + dict: tag payload to be used as part of the main scope payload + """ tag_payload = {} if management_zone_id: tag_payload['mzId'] = str(management_zone_id) if filter_type: - if filter_type in FilterType._member_names_: + if filter_type in FilterType._member_names_: # pylint: disable=no-member,protected-access tag_payload['type'] = filter_type else: raise ValueError( @@ -225,7 +247,25 @@ def generate_tag_scope(tag, filter_type=None, management_zone_id=None): return tag_payload -def generate_scope(entities=None, tags=None, filter_type=None, management_zone_id=None, match_any_tag=True): +def generate_scope( + entities=None, + tags=None, + filter_type=None, + management_zone_id=None, + match_any_tag=True +): + """Generate the total scope for maintenance window payload + + Args: + entities (list, optional): List of specific entities. Defaults to None. + tags (List,Dict,str, optional): List/Set/Individual Tags. Defaults to None. + filter_type (str, optional): Specific Entity Type for tag. Defaults to None. + management_zone_id ([type], optional): Specific MZ for tag. Defaults to None. + match_any_tag (bool, optional): Any vs All. Defaults to True. + + Returns: + dict: sub payload for maintenance window payload containing scope + """ if entities is None: entities = [] matches = [] @@ -269,13 +309,21 @@ def generate_window_json(name, description, suppression, schedule, scope=None, i return window_json -def generate_schedule(recurrence_type, start_time, duration, range_start, range_end, day=None, zoneId=None,): +def generate_schedule( + recurrence_type, + start_time, + duration, + range_start, + range_end, + day=None, + zone_id=None, +): """Create schedule structure for maintenance window""" # This structure requires a lot of input validation recurrence_type = str(recurrence_type).upper() # Check Recurrence - if recurrence_type not in RecurrenceType._member_names_: + if recurrence_type not in RecurrenceType._member_names_: # pylint: disable=no-member,protected-access raise ValueError( "Invalid Recurrence Type! Allowed values are: ONCE, DAILY, WEEKLY, MONTHLY") @@ -289,8 +337,8 @@ def generate_schedule(recurrence_type, start_time, duration, range_start, range_ "end": range_end } - if zoneId is None: - schedule['zoneId'] = uv.DEFAULT_TIMEZONE + if zone_id is None: + schedule['zoneId'] = get_setting('DEFAULT_TIMEZONE') if recurrence_type != "ONCE": # Check Start Time @@ -300,7 +348,7 @@ def generate_schedule(recurrence_type, start_time, duration, range_start, range_ try: int(duration) except ValueError: - ("Duration time must be an integer! Duration is length of Maintainence Window in minutes") + print("Duration time must be integer! Duration of Maintainence Window in minutes") schedule['recurrence'] = { "startTime": start_time, @@ -310,7 +358,7 @@ def generate_schedule(recurrence_type, start_time, duration, range_start, range_ # Check Weekly Day if recurrence_type == "WEEKLY": day = str(day).upper() - if day in DayOfWeek._member_names_: + if day in DayOfWeek._member_names_: # pylint: disable=no-member,protected-access schedule['recurrence']['dayOfWeek'] = day else: raise ValueError("Invalid Weekly Day! Allowed values are " @@ -320,7 +368,7 @@ def generate_schedule(recurrence_type, start_time, duration, range_start, range_ if recurrence_type == "MONTHLY": if not isinstance(day, int): raise TypeError("Invalid type for Day of Month! Int between 1-31 required") - if (1 <= int(day) <= 31): + if 1 <= int(day) <= 31: schedule['recurrence']['dayOfMonth'] = day else: raise ValueError("Invalid Monthly Day! Allowed values are 1-31") @@ -376,19 +424,19 @@ def get_window(cluster, tenant, window_id): def parse_tag(tag_string): # Need a way to process literal colon inside a key "Parsing Tag to to Context, Key and Value" - m = re.match( + tag_match = re.match( r"(?:\[(\w+)\])?([\w\-\/`\+\.\!\@\#\$\%\^\&\*\(\)\?\[\]\{\}\,\<\>\ \:\;]+)(?:\:(\w*))?", tag_string ) tag_dictionary = {} - if m.group(1): - tag_dictionary['context'] = m.group(1) + if tag_match.group(1): + tag_dictionary['context'] = tag_match.group(1) else: tag_dictionary['context'] = "CONTEXTLESS" - tag_dictionary['key'] = m.group(2) # Key is always required + tag_dictionary['key'] = tag_match.group(2) # Key is always required - if m.group(3): - tag_dictionary['value'] = m.group(3) + if tag_match.group(3): + tag_dictionary['value'] = tag_match.group(3) return tag_dictionary diff --git a/dynatrace/tenant/management_zones.py b/dynatrace/tenant/management_zones.py index 192c587..1f99cab 100644 --- a/dynatrace/tenant/management_zones.py +++ b/dynatrace/tenant/management_zones.py @@ -1,123 +1,278 @@ -# This script's function is too add Management Zones -# for an application based on application and environment if provided -"""Management Zone Operations for Environment""" -import copy +"""Functions for Management Zone Operations via Configuration API""" + import json -from dynatrace.requests import request_handler as rh - -ENDPOINT = rh.TenantAPIs.MANAGEMENT_ZONES - - -def generate_mz_payload(application, env_zone=None): - """Create Payload for Management Zone based on Application and Environment""" - with open('../templates/mz_template.json', 'r') as mz_template: - mz_payload = json.load(mz_template) - - mz_payload['name'] = str(application) - # The Template will have - # Service Rules(0), Process Group Rules(1), Application Rules(2), - # Browser Monitors(3), HTTP Monitor(4), External Monitors(5), Manually Tagged Services (6), - # Manually Tagged Process Groups (7), Mobile Application (8), Custom Device Groups (9), - # Service and Process Groups are different because they allow Key/Value Pairs - - # TODO Consolidate by checking if Key/Value Pair exists - mz_payload['rules'][0]['conditions'][0]['comparisonInfo']['value']['value'] = str( - application) - mz_payload['rules'][1]['conditions'][0]['comparisonInfo']['value']['value'] = str( - application) - - for rule_num in range(2, 10): - mz_payload['rules'][rule_num]['conditions'][0]['comparisonInfo']['value']['key'] = "APP: " + \ - str(application) - - if env_zone: - # If environment exists, rename MZ and add environment conditions - mz_payload['name'] = str(application) + " - " + str(env_zone) - - # Service and Process Groups are different because they allow Key/Value Pairs - condition_payload = copy.deepcopy( - mz_payload['rules'][0]['conditions'][0]) - condition_payload['comparisonInfo']['value']['key'] = "ENV" - condition_payload['comparisonInfo']['value']['value'] = str(env_zone) - mz_payload['rules'][0]['conditions'].append(condition_payload) - - del condition_payload - condition_payload = copy.deepcopy( - mz_payload['rules'][1]['conditions'][0]) - condition_payload['comparisonInfo']['value']['key'] = "ENV" - condition_payload['comparisonInfo']['value']['value'] = str(env_zone) - mz_payload['rules'][1]['conditions'].append(condition_payload) - # Application, Browser Monitors, HTTP Monitor, External Monitors (in that order) - - for rule_num in range(2, 10): - del condition_payload - condition_payload = copy.deepcopy( - mz_payload['rules'][rule_num]['conditions'][0]) - condition_payload['comparisonInfo']['value']['key'] = "ENV: " + \ - str(env_zone) - mz_payload['rules'][rule_num]['conditions'].append( - condition_payload) - - return mz_payload - - -def add_management_zone(cluster, tenant, application, env_zone=None): - """Add Management Zone based on Application and Environment""" - mz_payload = generate_mz_payload(application, env_zone) - - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - method=rh.HTTP.POST, - endpoint=ENDPOINT, - json=mz_payload) +from enum import Enum, auto +from dynatrace.framework import request_handler as rh, log_handler + +ENDPOINT = str(rh.TenantAPIs.MANAGEMENT_ZONES) +logger = log_handler.get_logger(__name__) + + +class RuleTypes(Enum): + """Accepted values for Management Zone rule types.""" + APPMON_SERVER = auto() + APPMON_SYSTEM_PROFILE = auto() + AWS_ACCOUNT = auto() + AWS_APPLICATION_LOAD_BALANCER = auto() + AWS_AUTO_SCALING_GROUP = auto() + AWS_CLASSIC_LOAD_BALANCER = auto() + AWS_NETWORK_LOAD_BALANCER = auto() + AWS_RELATIONAL_DATABASE_SERVICE = auto() + AZURE = auto() + BROWSER_MONITOR = auto() + CLOUD_APPLICATION = auto() + CLOUD_APPLICATION_NAMESPACE = auto() + CLOUD_FOUNDRY_FOUNDATION = auto() + CUSTOM_APPLICATION = auto() + CUSTOM_DEVICE = auto() + CUSTOM_DEVICE_GROUP = auto() + DATA_CENTER_SERVICE = auto() + ENTERPRISE_APPLICATION = auto() + ESXI_HOST = auto() + EXTERNAL_MONITOR = auto() + HOST = auto() + HOST_GROUP = auto() + HTTP_MONITOR = auto() + KUBERNETES_CLUSTER = auto() + MOBILE_APPLICATION = auto() + OPENSTACK_ACCOUNT = auto() + PROCESS_GROUP = auto() + SERVICE = auto() + WEB_APPLICATION = auto() + + def __str__(self): + """Overriding default __str__ to return the name.""" + return str(self.name) + + +def generate_mz_template(name, tags): + """Generates a standard Management Zone with custom name and rules matching tags. + The rules include Hosts, Services, Process Groups, Web & Mobile Applications, + Browser, HTTP, and External Synthetic Tests, and Custom Device Groups. + Tags must be given as a tuple in this order: context, key, value (optional). + \n + @param name (str) - The name of the Management Zone to be created\n + @param tags (list(tuple)) - [0] is tag context, [1] is tag key, [2] is the tag value + \n + @returns dict - Management Zone + """ + try: + if not isinstance(tags, list): + raise ValueError( + f"Tags must be given as a list of tuples. Found {type(tags)} instead." + ) + if not all(isinstance(tag, tuple) for tag in tags): + raise ValueError( + "All provided tags must be tuples. Found a mix of types instead." + ) + except ValueError: + logger.exception("Error: invalid format for tags object.", stack_info=True) + raise + logger.info("Building standard Management Zone from template") + logger.debug("Name: %s; Tags: %s", name, tags) + me_types = [ + RuleTypes.HOST, RuleTypes.SERVICE, RuleTypes.PROCESS_GROUP, + RuleTypes.WEB_APPLICATION, RuleTypes.BROWSER_MONITOR, RuleTypes.HTTP_MONITOR, + RuleTypes.MOBILE_APPLICATION, RuleTypes.CUSTOM_DEVICE_GROUP, + RuleTypes.EXTERNAL_MONITOR + ] + mz_rules = [ + dict( + type=str(me_type), + enabled=True, + propagationTypes=[], + conditions=[ + dict( + key=dict(attribute=f"{me_type}_TAGS"), + comparisonInfo=dict( + type="TAG", + operator="EQUALS" if len(tag) > 2 else "TAG_KEY_EQUALS", + value=dict( + context=tag[0], + key=tag[1], + value=tag[2] if len(tag) > 2 else None + ), + negate=False + ) + ) for tag in tags + ] + ) for me_type in me_types + ] + + mz_json = dict(name=name, rules=mz_rules) + + return mz_json + + +def add_management_zone(cluster, tenant, mz_json): + """Adds a new management zone to the tenant. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param mz_json (dict) - Management Zone definition, to be sent as JSON payload + \n + @returns str - ID of the newly created Management Zone, if successful + """ + logger.info("Adding a new Management Zone in tenant %s", tenant) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.POST, + endpoint=ENDPOINT, + json=mz_json + ) + if "id" in response.json(): - return (response.json())['id'] - else: - return (response.text) + return response.json()['id'] + + return response.text -def change_management_zone(cluster, tenant, mz_id, application, env_zone=None): - """Add Management Zone based on Application and Environment""" - mz_payload = generate_mz_payload(application, env_zone) +def update_management_zone(cluster, tenant, mz_id, mz_json): + """Updates an existing Management Zone with given definition. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param mz_json (dict) - Management Zone definition, to be sent as JSON payload\n + @param mz_id (str) - ID of the Management Zone to update + \n + @returns Response - HTTP Response to the request + """ + logger.info("Updating Management Zone with ID %s in tenant %s", mz_id, tenant) - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - method=rh.HTTP.PUT, - endpoint=f"{ENDPOINT}/{mz_id}", - json=mz_payload) - print(response.status_code) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.PUT, + endpoint=f"{ENDPOINT}/{mz_id}", + json=mz_json + ) + + return response def delete_management_zone_by_id(cluster, tenant, mz_id): - """Delete Management Zone by Management Zone ID""" - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - method=rh.HTTP.DELETE, - endpoint=f"{ENDPOINT}/{mz_id}") - print(response.status_code) + """Deletes an existing Management Zone, referenced by ID. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param mz_id (str) - ID of the Management Zone to delete + \n + @returns Response - HTTP Response to the request + """ + logger.info("Deleting Management Zone with ID %s from tenant %s", mz_id, tenant) + + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.DELETE, + endpoint=f"{ENDPOINT}/{mz_id}" + ) + + return response def delete_management_zone_by_name(cluster, tenant, mz_name): - """Delete Management Zone by Management Zone Name""" - # TODO This function - return "TODO " + cluster + tenant + mz_name + """Deletes an existing Management Zone, referenced by name. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param mz_name (str) - name of the Management Zone to delete + \n + @returns Response - HTTP Response to the request + \n + @throws RuntimeError - when Management Zone was not found in tenant + """ + mz_id = get_management_zone_id(cluster, tenant, mz_name) + + if not mz_id: + try: + raise RuntimeError( + f"Error: No Management Zone found with name {mz_id} in tenant {tenant}" + ) + except RuntimeError: + logger.exception("Error: Management Zone not found.", stack_info=True) + raise + + logger.info("Deleting the Management Zone from tenant") + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.DELETE, + endpoint=f"{ENDPOINT}/{mz_id}" + ) + + return response + +def get_all_management_zones(cluster, tenant): + """Gets all Management Zones within a Tenant. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set + \n + @returns list - list of Management Zones + """ + logger.info("Getting all Management Zones from tenant %s", tenant) + management_zones = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT + ).json().get("values") -def get_management_zone_list(cluster, tenant): - """Get all Management Zones in Environment""" - # TODO Cache Management Zone list for Env, and add a cleanup script to remove after run. - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=ENDPOINT) - mz_list_raw = response.json() - return mz_list_raw['values'] + return management_zones + + +def get_management_zone_details(cluster, tenant, mz_id): + """Gets the full details of a Management Zone referenced by ID. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param mz_id (str) - ID of the Management Zone to fetch + \n + @returns dict - Management Zone details + """ + logger.info( + "Getting details for Management Zone with id %s in tenant %s", mz_id, tenant + ) + mz_details = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{mz_id}" + ).json() + + return mz_details def get_management_zone_id(cluster, tenant, mz_name): - """Get Management Zone ID of Management Zone Name""" - mz_list = get_management_zone_list(cluster, tenant) + """Gets the ID of a Management Zone referenced by name. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param mz_name (str) - name of the Management Zone to find + \n + @returns str - ID of the Management Zone if found. None otherwise. + """ + logger.info( + "Finding ID for Management Zone with name %s in tenant %s", mz_name, tenant + ) + mz_list = get_all_management_zones(cluster, tenant) for m_zone in mz_list: if m_zone['name'] == mz_name: return m_zone['id'] return None + + +def import_mz_from_file(file): + """Reads a Management Zone definition from a (JSON) file. + \n + @param file (str) - the file to read (must be valid JSON) + \n + @returns dict - dictionary created from reading the file + """ + logger.info("Reading Management Zone from file.") + with open(file=file, mode="r") as json_file: + mz_details = json.load(json_file) + + return mz_details diff --git a/dynatrace/tenant/metrics.py b/dynatrace/tenant/metrics.py index ed78db9..d46302a 100644 --- a/dynatrace/tenant/metrics.py +++ b/dynatrace/tenant/metrics.py @@ -1,24 +1,167 @@ -from dynatrace.requests import request_handler as rh +"""Module for interacting with the Metrics API""" +from dynatrace.framework import request_handler as rh, log_handler +from dynatrace.framework.exceptions import InvalidAPIResponseException -ENDPOINT = rh.TenantAPIs.METRICS +ENDPOINT = str(rh.TenantAPIs.METRICS) +logger = log_handler.get_logger(__name__) -def get_metrics(cluster, tenant, params=None): - """Gets the list of metrics and their details""" +def get_metric_descriptor(cluster, tenant, **kwargs): + """Get a list of metric descriptors and their details. + Valid metricSelector must be provided in kwargs. List contains all default + details or anything specified through 'fields' kwarg. + \n + @param cluster (dict) - Dynatrace cluster (as taken from variable set) + @param tenant (str) - name of Dynatrace tenant (as taken from variable set) + \n + @returns list - list of metric descriptors mathing the metricSelector + """ + logger.info("Getting metric descriptors") + descriptors = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + api_version=2, + item='metrics', + **kwargs + ).get('metrics') + + return descriptors + + +def get_metric_count(cluster, tenant, **kwargs): + """Get the number of metrics matching the metricSeletor + \n + @param cluster (dict) - Dynatrace cluster (as taken from variable set) + @param tenant (str) - name of Dynatrace tenant (as taken from variable set) + \n + @returns int - Number of metrics matching the metricSelector + """ + logger.info("Getting the total metric count for the query.") + count = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + params=kwargs + ).json().get('totalCount') + + return count + + +def get_metric_data(cluster, tenant, **kwargs): + """Gets data points for given metrics. + One or more metrics and aggregations can be specified using a metricSelector. + The function grabs the datapoints for all entities matching entitySelector if + this was specified. Results are indexed in a dictionary with the metric_id as + key and the data as a list. + \n + @param cluster (dict) - Dynatrace cluster (as taken from variable set) + @param tenant (str) - name of Dynatrace tenant (as taken from variable set) + \n + @kwargs metricSelector (str) - mandatory. used to pass in ID of queried metric(s) + \n + @returns dict - metric data as dictionary with metric id as key + \n + @throws Exception - exception as thrown from downstream + """ next_page_key = 1 - metrics = [] + results = {} + logger.info("Getting metric datapoints") while next_page_key: # Upon subsequent calls, clear all other params if next_page_key != 1: - params = dict(nextPageKey=next_page_key) + kwargs = dict(nextPageKey=next_page_key) - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=ENDPOINT, - params=params) + try: + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/query", + params=kwargs) + except InvalidAPIResponseException as err: + if 'metric key that could not be resolved in the metric registry' in str(err): + logger.warning("Invalid metric ID encountered. Returning results so far.") + break + logger.exception("Error: Invalid API response", stack_info=True) + raise + + for result in response.json().get('result'): + metric = result.get('metricId') + if results.get(metric): + results[metric].extend(result.get('data')) + else: + results[metric] = result.get('data') - metrics.extend(response.json().get('metrics')) next_page_key = response.json().get('nextPageKey') - return metrics + return results + + +def get_metric_dimension_count(cluster, tenant, metric_selector): + """Function returns the sum total of dimensions defined for one or more metrics. + Useful in DDU calculations for estimating the max number of DDUs that will be + consumed. + + \n + @param cluster (dict) - Dynatrace cluster (as taken from variable set) + @param tenant (str) - name of Dynatrace tenant (as taken from variable set) + @param metric_selector (str) - mandatory. used to pass in ID of queried metric(s) + \n + @returns int - the sum total of dimensions across all matched metrics + """ + logger.info("Getting dimension count for metric(s)") + details = get_metric_descriptor( + cluster=cluster, + tenant=tenant, + metricSelector=metric_selector, + fields='dimensionDefinitions', + pageSize=5000 + ) + + dimensions = sum( + [len(detail.get('dimensionDefinitions')) + for detail in details] + ) if details else 0 + + return dimensions + + +def get_metric_estimated_ddus(cluster, tenant, metric_selector): + """Function returns the total maximum yearly DDUs that the metrics are allowed + to consume. This is calculated by multiplying the total number of dimensions + by 525.6 (yearly DDUs for 1 metric). This assumes the metric is collected every + minute. Useful for understanding DDU budget requirements. + \n + @param cluster (dict) - Dynatrace cluster (as taken from variable set) + @param tenant (str) - name of Dynatrace tenant (as taken from variable set) + @param metric_selector (str) - mandatory. used to pass in ID of queried metric(s) + \n + @returns (float) - total number of yearly DDUs + """ + logger.info("Getting DDUs for metric(s)") + return get_metric_dimension_count( + cluster=cluster, + tenant=tenant, + metric_selector=metric_selector + ) * 525.6 + + +def ingest_metrics(cluster, tenant, payload): + """Ingests metrics based on given payload. + Payload must be formatted according to Dynatrace line-protocol for metric ingest. + \n + @param cluster (dict) - Dynatrace cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace tenant (as taken from variable set)\n + @param payload (str) - payload for metric ingestion. must be formatted according to + Dynatrace line protocol. + \n + @returns (dict) - response to HTTP request + """ + logger.info("Sending metrics to Dynatrace") + return rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/ingest", + data=payload, + method=rh.HTTP.POST + ) diff --git a/dynatrace/tenant/oneagents.py b/dynatrace/tenant/oneagents.py new file mode 100644 index 0000000..98ab683 --- /dev/null +++ b/dynatrace/tenant/oneagents.py @@ -0,0 +1,100 @@ +"""Module for OneAgent operations.""" + +from dynatrace.framework import request_handler as rh, log_handler + +logger = log_handler.get_logger(__name__) + + +def get_host_units_tenantwide(cluster, tenant, **kwargs): + """Get Host Units used by the tenant + \n + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @kwargs - dictionary of query parameters valid with the API.\n + @returns - total number of host units consumed + """ + host_units = 0 + + logger.info("Getting hosts from tenant %s", tenant) + host_list = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=f'{rh.TenantAPIs.V1_TOPOLOGY}/infrastructure/hosts', + api_version=1, + **kwargs + ) + + logger.info("Adding up host units") + for host in host_list: + host_units += round(host['consumedHostUnits'], ndigits=3) + + return round(host_units, ndigits=3) + + +def get_host_units_clusterwide(cluster, aggregated=True, **kwargs): + """Get Host Units used by the cluster. + \n + @param cluster - Dynatrace Cluster (from variable set)\n + @param aggregated - return results aggregated or split by tenant\n + @kwargs - dictionary of query parameters valid with the API.\n + @returns - total number of host units consumed or dict object + with tenants as keys if not aggregated. + """ + total_host_units = 0 + host_units = {} + + logger.info("Getting host units for the whole cluster") + for tenant in cluster['tenant']: + tenant_host_units = get_host_units_tenantwide( + cluster=cluster, + tenant=tenant, + **kwargs + ) + total_host_units += tenant_host_units + host_units[tenant] = tenant_host_units + + return total_host_units if aggregated else host_units + + +def get_host_units_setwide(full_set, aggregated=True, **kwargs): + """Get Host Units used by the full set of clusters. + \n + @param full_set - Variable Set\n + @param aggregated - return results aggregated or split by cluster\n + @kwargs - dictionary of query parameters valid with the API.\n + @returns - total number of host units consumed or dict object + with clusters as keys if not aggregated. + """ + total_host_units = 0 + host_units = {} + + logger.info("Getting host units for the whole set") + for cluster in full_set: + cluster_host_units = get_host_units_clusterwide( + cluster=full_set[cluster], + **kwargs + ) + total_host_units += cluster_host_units + host_units[cluster] = cluster_host_units + + return total_host_units if aggregated else host_units + + +def get_oneagents_tenantwide(cluster, tenant, **kwargs): + """Get OneAgent details for all hosts in the tenant. + \n + @param cluster - Dynatrace Cluster (from variable set)\n + @param tenant - Dynatrace Tenant (from variable set)\n + @kwargs - dictcionary of query parameters valid with the API\n + + @returns - list of OneAgents + """ + logger.info("Getting OneAgents from tenant %s", tenant) + return rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=rh.TenantAPIs.ONEAGENTS, + api_version=2, + item='hosts', + **kwargs + ).get('hosts') diff --git a/dynatrace/tenant/problems.py b/dynatrace/tenant/problems.py new file mode 100644 index 0000000..3192115 --- /dev/null +++ b/dynatrace/tenant/problems.py @@ -0,0 +1,216 @@ +"""Module for interactions with the Problems (V2) API""" +from dynatrace.framework import request_handler as rh, log_handler + +ENDPOINT = str(rh.TenantAPIs.PROBLEMS) +logger = log_handler.get_logger(__name__) + + +def get_all_problems(cluster, tenant, **kwargs): + """Gets the list of all problems mathing the query parameters. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set) + \n + @returns list - list of problems + """ + logger.info("Getting problems from tenant %s", tenant) + problems_list = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + api_version=2, + item="problems", + **kwargs + ).get('problems') + + return problems_list + + +def get_problem_count(cluster, tenant, **kwargs): + """Gets the total number of problems matching query parameters. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set) + \n + @returns int - number of problems + """ + logger.info("Getting the total problem count in tenant %s", tenant) + problems_list = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + params=kwargs + ).json() + count = problems_list.get('totalCount') + + return count + + +def get_problem_details(cluster, tenant, problem_id, **kwargs): + """Retrieves the details of a specific problem. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set)\n + @param problem_id (str) - ID of the problem to retrieve + \n + @kwargs fields (str) - comma separated list of fields to include in details. + (evidenceDetails, impactAnalysis, recentComments) + \n + @returns (dict) - problem details + """ + logger.info("Getting problem details for problem %s", problem_id) + details = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{problem_id}", + params=kwargs + ).json() + + return details + + +def close_problem(cluster, tenant, problem_id, comment=""): + """Manually closes an open problem, leaving a comment. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set)\n + @param problem_id (str) - ID of the problem to close\n + @param comment (str) - closing comment + \n + @returns Response - HTTP response for the request + """ + logger.info("Closing problem %s", problem_id) + logger.info("Closing comment: %s", comment) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{problem_id}/close", + method=rh.HTTP.POST, + json=dict(message=comment) + ) + + return response + + +def get_all_comments(cluster, tenant, problem_id, **kwargs): + """Gets a list of all comments of a problem. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set)\n + @param problem_id (str) - ID of the problem to close + \n + @kwargs pageSize (int) - affects number of API calls + \n + @returns list - list of comments + """ + logger.info("Getting comments from problem %s", problem_id) + comments = rh.get_results_whole( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{problem_id}/comments", + api_version=2, + item="comments", + **kwargs + ).get("comments") + + return comments + + +def get_comment(cluster, tenant, problem_id, comment_id): + """Gets a comment from a Problem. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set)\n + @param problem_id (str) - ID of the problem containing the comment\n + @param comment_id (str) - ID fo the comment to retrieve + \n + @returns dict - comment details + """ + logger.info("Getting details for comment %s from problem %s", comment_id, problem_id) + comment = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{problem_id}/comments/{comment_id}" + ).json() + + return comment + + +def add_comment(cluster, tenant, problem_id, **kwargs): + """Adds a comment to a problem. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set)\n + @param problem_id (str) - ID of the problem to add the comment to + \n + @kwargs comment (str) - comment content\n + @kwargs context (str) - comment context. added under "via ..." + \n + @returns Response - HTTP response for the request + """ + logger.info("Adding comment to problem %s", problem_id) + comment = kwargs.get("comment") if "comment" in kwargs else "" + context = kwargs.get("context") if "context" in kwargs else "" + + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{problem_id}/comments", + method=rh.HTTP.POST, + json=dict(comment=comment, context=context) + ) + + return response + + +def update_comment(cluster, tenant, problem_id, comment_id, **kwargs): + """Updates an existing comment of a problem. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set)\n + @param problem_id (str) - ID of the problem containing the comment\n + @param comment_id (str) - ID of the comment to update + \n + @kwargs comment (str) - comment content\n + @kwargs context (str) - comment context. added under "via ..." + \n + @returns Response - HTTP response for the request + """ + logger.info("Updating comment %s from problem %s", comment_id, problem_id) + comment = get_comment(cluster, tenant, problem_id, comment_id) + + if "comment" in kwargs: + comment["comment"] = kwargs.get("comment") + if "context" in kwargs: + comment["context"] = kwargs.get("context") + + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{problem_id}/comments/{comment_id}", + method=rh.HTTP.PUT, + json=comment + ) + + return response + + +def delete_comment(cluster, tenant, problem_id, comment_id): + """Deletes a comment from a problem. + \n + @param cluster (dict) - Dynatrace Cluster (as taken from variable set)\n + @param tenant (str) - name of Dynatrace Tenant (as taken from variable set)\n + @param problem_id (str) - ID of the problem containing the comment\n + @param comment_id (str) - ID of the comment to delete + \n + @returns Response - HTTP response for the request + """ + logger.info("Deleting comment %s from problem %s", comment_id, problem_id) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{problem_id}/comments/{comment_id}", + method=rh.HTTP.DELETE + ) + + return response diff --git a/dynatrace/tenant/request_attributes.py b/dynatrace/tenant/request_attributes.py index d1340ac..1b53444 100644 --- a/dynatrace/tenant/request_attributes.py +++ b/dynatrace/tenant/request_attributes.py @@ -1,75 +1,210 @@ -#!/bin/python3 -"""Request Attributes Operations""" +"""Module for Request Attributes Operations via Configuration API""" + +import os import json -from dynatrace.requests import request_handler as rh - -ENDPOINT = rh.TenantAPIs.REQUEST_ATTRIBUTES - - -def pull_to_files(cluster, tenant, ignore_disabled=True): - """Pull files from an environment to local""" - # API Calls needed: Pull RA, take the ID and pull the details of each RA - all_ra_call = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=ENDPOINT) - all_ra_json = all_ra_call.json() - all_ra_json = all_ra_json['values'] - # print (json.dumps(all_ra_json, indent=2)) - ra_file_list = [] - for request_attribute in all_ra_json: - single_ra_call = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=f"{ENDPOINT}/{request_attribute['id']}") - if single_ra_call.status_code == 200: - single_ra_json = single_ra_call.json() - if single_ra_json['enabled'] and ignore_disabled: - single_ra_json.pop("metadata") - single_ra_json.pop("id") - ra_file_name = "jsons/request_attributes/" + \ - str(single_ra_json['name']) + ".json" - with open(ra_file_name, 'w') as current_file: - json.dump(single_ra_json, current_file, indent=2) - ra_file_list.append(ra_file_name) - else: - print(single_ra_call.status_code) - return ra_file_list - - -def push_from_files(file_list, cluster, tenant): - """Push Request Attributes in JSONs to a tenant""" - - # Checks for Existing RAs to update them put request rather than a post that would fail - existing_ra_get = rh.make_api_call(cluster=cluster, tenant=tenant, endpoint=ENDPOINT) - existing_ra_json = existing_ra_get.json() - existing_ra_json = existing_ra_json['values'] - existing_ra_list = {} - for existing_ra in existing_ra_json: - existing_ra_list["jsons/request_attributes/" + - str(existing_ra['name']) + ".json"] = existing_ra['id'] - - for file in file_list: - with open(file, 'r') as ra_file: - ra_json = json.load(ra_file) - if file in existing_ra_list: - single_ra_post = rh.make_api_call( - cluster=cluster, - tenant=tenant, - method=rh.HTTP.PUT, - endpoint=f"{ENDPOINT}/{existing_ra_list[file]}", - json=ra_json - ) - else: - single_ra_post = rh.make_api_call( - cluster=cluster, - tenant=tenant, - method=rh.HTTP.POST, - endpoint=ENDPOINT, - json=ra_json - ) - if single_ra_post.status_code >= 400: - # NOTE: what about the check response in req handler!? - # That will throw an exception first, which this should except - print("Error with " + file + ". Status Code: " + - str(single_ra_post.status_code)) - else: - print("Success " + file + " " + single_ra_post.text) +from dynatrace.framework import request_handler as rh, log_handler + +ENDPOINT = str(rh.TenantAPIs.REQUEST_ATTRIBUTES) +logger = log_handler.get_logger(__name__) + + +def get_all_request_attributes(cluster, tenant): + """Get all request attributes within a tenant. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set + \n + @returns list - list of Request Attributes from tenant + """ + logger.info("Getting all request attributes in tenant %s", tenant) + req_attrs = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT + ).json().get("values") + + return req_attrs + + +def get_request_attribute_details(cluster, tenant, ra_id): + """Get the full details of a request attribute in the tenant. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param ra_id (str) - ID of the request attribute to fetch + \n + @returns dict - Request Attribute details + """ + logger.info( + "Getting details for request attribute with id %s in tenant %s", ra_id, tenant + ) + details = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{ra_id}" + ).json() + + return details + + +def create_request_attribute(cluster, tenant, ra_json): + """Creates a new request attribute from given JSON and adds it to the tenant. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param ra_json (dict) - details of the request attribute to be sent as JSON payload + \n + @returns Response - HTTP Response for the request + """ + logger.info( + "Adding a request attribute called %s in tenant %s", ra_json.get("name"), tenant + ) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + method=rh.HTTP.POST, + json=ra_json + ) + + return response + + +def update_request_attribute(cluster, tenant, ra_id, ra_json): + """Updates an existing request attribute in the tenant. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param ra_json (dict) - details of the request attribute to be sent as JSON payload + \n + @returns Response - HTTP Response for the request + """ + logger.info( + "Updating request attribute with ID %s in tenant %s", ra_id, tenant + ) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{ra_id}", + method=rh.HTTP.PUT, + json=ra_json + ) + + return response + + +def delete_request_attribute_by_id(cluster, tenant, ra_id): + """Deletes an existing request attribute, referenced by ID. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param ra_id (str) - ID of the request attribute to delete + \n + @returns Response - HTTP Response + """ + logger.info( + "Deleting request attribute with ID %s from tenant %s", ra_id, tenant + ) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{ra_id}", + method=rh.HTTP.DELETE + ) + + return response + + +def delete_request_attribute_by_name(cluster, tenant, ra_name): + """Deletes an existing request attribute, referenced by name. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param ra_name (str) - name of the request attribute to delete\n + \n + @returns Response - HTTP Response + \n + @throws RuntimeError - when no ID was found for the request attribute + """ + ra_id = get_request_attribute_id(cluster, tenant, ra_name) + + if not ra_id: + try: + raise RuntimeError( + f"Error: request attribute with name {ra_name} was not found" + ) + except RuntimeError: + logger.exception("Error: request attribute not found.", stack_info=True) + raise + + return delete_request_attribute_by_id(cluster, tenant, ra_id) + + +def create_or_update_request_attribute(cluster, tenant, ra_json): + """Either creates a new request attribute from the provided JSON or updates it if it + already exists in the tenant. Either way, the request attribute will be in the + tenant. It will check if there is an ID present in the JSON for the update. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param ra_json (dict) - details of the request attribute to be sent as JSON payload + \n + @returns Response - HTTP Response to the request + """ + ra_id = ra_json.get("id") + + if ra_id: + return update_request_attribute(cluster, tenant, ra_id, ra_json) + + return create_request_attribute(cluster, tenant, ra_json) + + +def get_request_attribute_id(cluster, tenant, ra_name): + """Gets the ID for a request attribute referenced by name. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param ra_name (str) - name of the Request Attribute + \n + @returns str - ID of the request attribute if found. None otherwise. + """ + logger.info("Finding the ID for request attribute with name %s", ra_name) + req_attrs = get_all_request_attributes(cluster, tenant) + + for req_attr in req_attrs: + if req_attr.get("name") == ra_name: + return req_attr.get("id") + return None + + +def export_to_files(cluster, tenant, folder): + """Export all the request attributes in the tenant to files. + Each request attribute is written to a separate file in the folder provided. + The folder must exist already. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param folder (str) - path to folder where to write the export files + \n + @throws RuntimeError - when the specified folder is not found + """ + if not os.path.exists(folder): + try: + raise RuntimeError("Error: the given folder path does not exist") + except RuntimeError: + logger.exception("Error: folder path not found.", stack_info=True) + raise + + if "/" in folder and not folder.endswith("/"): + folder = f"{folder}/" + if "\\" in folder and not folder.endswith("\\"): + folder = f"{folder}\\" + + req_attrs = get_all_request_attributes(cluster, tenant) + + logger.info("Exporting request attributes. Writing files inside %s", folder) + for req_attr in req_attrs: + logger.debug("Exporting request attribute called %s", req_attr.get("name")) + ra_data = get_request_attribute_details(cluster, tenant, req_attr.get("id")) + with open(file=f"{folder}{req_attr.get('name')}.json", mode="w") as ra_file: + json.dump(ra_data, ra_file, indent=4) diff --git a/dynatrace/tenant/request_naming.py b/dynatrace/tenant/request_naming.py index e284524..23b1ab1 100644 --- a/dynatrace/tenant/request_naming.py +++ b/dynatrace/tenant/request_naming.py @@ -1,52 +1,133 @@ -#!/bin/python3 -"""Global Service Request Naming Class""" +"""Service Request Naming Rule Operations via the Configuration API""" import os import json -from dynatrace.requests import request_handler as rh - -ENDPOINT = rh.TenantAPIs.REQUEST_NAMING - - -def pull_to_files(cluster, tenant, ignore_disabled=True): - """Pull Service Naming Rules to Files""" - all_rules_call = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=ENDPOINT) - all_rules_list = all_rules_call.json() - all_rules_list = all_rules_list['values'] - # print (json.dumps(all_rules_list, indent=2)) - - rules_file_list = [] - rule_num = 0 - for naming_rule in all_rules_list: - rule_call = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=f"{ENDPOINT}/{naming_rule['id']}") - if rule_call.status_code == 200: - rule_json = rule_call.json() - if rule_json['enabled'] and ignore_disabled: - rule_json.pop('metadata') - rule_json.pop('id') - rule_file_name = f"jsons/request_naming/{rule_num}.json" - with open(rule_file_name, 'w') as current_file: - json.dump(rule_json, current_file, indent=2) - rules_file_list.append(rule_file_name) - else: - print(rule_call.status_code) - rule_num = rule_num + 1 - return rules_file_list - - -def push_from_files(file_list, cluster, tenant): - """Push Service Naming Rules from Files""" - # TODO add safeties - for file_name in file_list: - print(file_name) - - -def generate_file_list(): - file_list = os.listdir("./jsons/request_naming/") - for f in file_list: - print(str.isdigit(f)) - # print(file_list.sort(key=lambda f: filter(str.isdigit, f))) +from dynatrace.framework import request_handler as rh + +ENDPOINT = str(rh.TenantAPIs.REQUEST_NAMING) + + +def delete_naming_rule(cluster, tenant, rule_id): + """Deletes an already existing request naming rule, referenced by its ID. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param rule_id (str) - ID of the request naming rule to delete + \n + @returns Response - HTTP Response to the request + """ + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{rule_id}", + method=rh.HTTP.DELETE + ) + + return response + + +def update_naming_rule(cluster, tenant, rule_id, rule_json): + """Updates an already existing request naming rule, referenced by its ID. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param rule_id (str) - ID of the request naming rule to update\n + @param rule_json (dict) - new rule definition, to be sent as JSON payload + \n + @returns Response - HTTP Response to the request + """ + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{rule_id}", + method=rh.HTTP.PUT, + json=rule_json + ) + + return response + + +def create_naming_rule(cluster, tenant, rule_json): + """Creates a new request naming rule. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param rule_json (dict) - new rule definition, to be sent as JSON payload + \n + @returns Response - HTTP Response to the request + """ + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + method=rh.HTTP.POST, + json=rule_json + ) + + return response + + +def get_rule_details(cluster, tenant, rule_id): + """Gets the definition of an already existing request naming rule, referenced + by its ID. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param rule_id (str) - ID of the request naming rule to fetch + \n + @returns dict - the rule definition (details) + """ + details = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{rule_id}", + method=rh.HTTP.GET + ).json() + + return details + + +def get_all_rules(cluster, tenant): + """Gets a list of all request naming rules in the tenant. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set + \n + @returns list - list of request naming rules + """ + rules = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + method=rh.HTTP.GET + ).json().get("values") + + return rules + + +def export_to_files(cluster, tenant, folder): + """Exports request naming rules from the tenant to .json files. + Each rule is written to its own file, in JSON format. The file is named after the + rule. + \n + @param cluster (dict) - Dynatrace Cluster dictionary, as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set\n + @param folder (str) - path to folder where to create the files. + \n + @throws RuntimeError - when the folder path does not exist. + """ + if not os.path.exists(folder): + raise RuntimeError("Error: export folder path does not exist") + + if "/" in folder and not folder.endswith("/"): + folder += "/" + if "\\" in folder and not folder.endswith("\\"): + folder += "\\" + + rules = get_all_rules(cluster, tenant) + + for rule in rules: + rule_details = get_rule_details(cluster, tenant, rule.get("id")) + rule_name = rule.get("name") + with open(file=f"{folder}{rule_name}.json", mode="w") as rule_file: + json.dump(rule_details, rule_file, indent=4) diff --git a/dynatrace/tenant/timeseries.py b/dynatrace/tenant/timeseries.py index be9e8ce..3c39127 100644 --- a/dynatrace/tenant/timeseries.py +++ b/dynatrace/tenant/timeseries.py @@ -1,34 +1,33 @@ -from dynatrace.requests import request_handler as rh - -ENDPOINT = "timeseries/" - - -def get_timeseries_list(cluster, tenant, params=None): - """Get List of Timeseries Metics""" - response = rh.make_api_call(cluster, tenant, ENDPOINT, params=params) - return response.json() - - -def get_timeseries_metric(cluster, tenant, metric, params=None): - """Get Timeseries Metric""" - # Chose to do GET, but could also be done as POST. Don't think there are any advantages to post - response = rh.make_api_call(cluster, tenant, ENDPOINT + metric, params=params) - return response.json() - - -def create_custom_metric(cluster, tenant, metric, json, params=None): - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=f"{ENDPOINT}{metric}", - params=params, - method=rh.HTTP.PUT, - json=json) - return response.status_code - - -def delete_custom_metic(cluster, tenant, metric): - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - method=rh.HTTP.DELETE, - endpoint=f"{ENDPOINT}{metric}") - return response.status_code +"""Timerseries Operations from Environment V1 API +Note: module contains only use cases not currently fulfilled via Metrics (V2) API +""" +from dynatrace.framework import request_handler as rh + +ENDPOINT = rh.TenantAPIs.TIMESERIES + + +def get_metric_data_with_prediction(cluster, tenant, timeseries_id, **kwargs): + """Get datapoints for a metric, including prediction. + This returns a dictionary, where the timeseries ID is a key and the value is a list + of datapoints (timestamp + data). Cannot use timeframe larger than 30 min. + \n + @param cluster (dict) - Dynatrace Cluster dictionary as taken from variable set\n + @param tenant (str) - Dynatrace Tenant name, as taken from variable set \n + @param timeseries_id (str) - ID of the Timeseries to extract + \n + @returns dict - predicted datapoints of the timeseries + """ + kwargs["includeData"] = True + kwargs["predict"] = True + if not (("startTimestamp" in kwargs and "endTimestamp" in kwargs) + or "relativeTime" in kwargs): + kwargs["relativeTime"] = "30mins" + + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{timeseries_id}", + params=kwargs + ).json() + + return response.get("dataResult").get("dataPoints") diff --git a/dynatrace/tenant/topology/__init__.py b/dynatrace/tenant/topology/__init__.py deleted file mode 100644 index 17f1f19..0000000 --- a/dynatrace/tenant/topology/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from dynatrace.tenant.topology.applications import * -from dynatrace.tenant.topology.custom import * -from dynatrace.tenant.topology.hosts import * -from dynatrace.tenant.topology.process import * -from dynatrace.tenant.topology.process_groups import * -from dynatrace.tenant.topology.services import * -from dynatrace.tenant.topology.shared import * diff --git a/dynatrace/tenant/topology/applications.py b/dynatrace/tenant/topology/applications.py deleted file mode 100644 index 083ee34..0000000 --- a/dynatrace/tenant/topology/applications.py +++ /dev/null @@ -1,82 +0,0 @@ -"""Application operations from the Dynatrace API""" -# Applications needs a seperate definition since the url is not the same (not /infrastructre/) -from dynatrace.requests import request_handler as rh - -ENDPOINT = f"{rh.TenantAPIs.V1_TOPOLOGY}/applications" - - -def get_applications_tenantwide(cluster, tenant): - """Get Information for all applications in a tenant""" - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=ENDPOINT) - return response.json() - - -def get_application(cluster, tenant, entity): - """Get Information on one application for in a tenant""" - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=f"{ENDPOINT}/{entity}") - return response.json() - - -def set_application_properties(cluster, tenant, entity, prop_json): - """Update properties of application entity""" - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=f"{ENDPOINT}/{entity}", - method=rh.HTTP.POST, - json=prop_json) - return response.json() - - -def get_application_count_tenantwide(cluster, tenant): - """Get total count for all applications in a tenant""" - params = { - "relativeTime": "day", - "includeDetails": "false" - } - - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=ENDPOINT, - params=params) - env_app_count = len(response.json()) - return env_app_count - - -def get_application_count_clusterwide(cluster): - """Get total count for all applications in cluster""" - cluster_app_count = 0 - for env_key in cluster['tenant']: - cluster_app_count = cluster_app_count \ - + get_application_count_tenantwide(cluster, - env_key) - return cluster_app_count - - -def get_application_count_setwide(full_set): - full_set_app_count = 0 - for cluster_items in full_set.values(): - full_set_app_count = full_set_app_count \ - + get_application_count_clusterwide(cluster_items) - return full_set_app_count - - -def add_application_tags(cluster, tenant, entity, tag_list): - """Add tags to application""" - if tag_list is None: - raise TypeError("tag_list cannot be None type") - tag_json = { - 'tags': tag_list - } - return set_application_properties(cluster, tenant, entity, tag_json) - - -def get_application_baseline(cluster, tenant, entity): - """Get baselines on one application for in a tenant""" - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=f"{ENDPOINT}/{entity}/baseline") - return response.json() diff --git a/dynatrace/tenant/topology/custom.py b/dynatrace/tenant/topology/custom.py deleted file mode 100644 index 658f62f..0000000 --- a/dynatrace/tenant/topology/custom.py +++ /dev/null @@ -1,6 +0,0 @@ -import dynatrace.tenant.topology.shared as topology_shared - - -def set_custom_properties(cluster, tenant, entity, prop_json): - """Update properties of process_group entity""" - return topology_shared.set_env_layer_properties(cluster, tenant, 'custom', entity, prop_json) diff --git a/dynatrace/tenant/topology/hosts.py b/dynatrace/tenant/topology/hosts.py deleted file mode 100644 index 822725c..0000000 --- a/dynatrace/tenant/topology/hosts.py +++ /dev/null @@ -1,75 +0,0 @@ -"""Host operations from the Dynatrace API""" -import dynatrace.tenant.topology.shared as topology_shared -from dynatrace.requests import request_handler as rh - - -def get_hosts_tenantwide(cluster, tenant, params=None): - """Get Information for all hosts in a tenant""" - return topology_shared.get_env_layer_entities(cluster, tenant, 'hosts', params=params) - - -def get_host(cluster, tenant, entity, params=None): - """Get Information on one host for in a tenant""" - return topology_shared.get_env_layer_entity(cluster, tenant, 'hosts', entity, params=params) - - -def set_host_properties(cluster, tenant, entity, prop_json): - """Update properties of host entity""" - return topology_shared.set_env_layer_properties(cluster, tenant, 'hosts', entity, prop_json) - - -def get_host_count_tenantwide(cluster, tenant, params=None): - """Get total count for all hosts in a tenant""" - return topology_shared.get_env_layer_count(cluster, tenant, 'hosts', params=params) - - -def get_host_count_clusterwide(cluster, params=None): - """Get total count for all hosts in cluster""" - return topology_shared.get_cluster_layer_count(cluster, 'hosts', params=params) - - -def get_host_count_setwide(full_set, params=None): - """Get total count of hosts for all clusters definied in variable file""" - return topology_shared.get_set_layer_count(full_set, 'hosts', params=params) - - -def add_host_tags(cluster, tenant, entity, tag_list): - """Add tags to host""" - return topology_shared.add_env_layer_tags(cluster, tenant, 'hosts', entity, tag_list) - - -def delete_host_tag(cluster, tenant, entity, tag): - """Remove single tag from host""" - if tag is None: - raise TypeError("Tag cannot be None!") - return rh.make_api_call(cluster=cluster, - tenant=tenant, - method=rh.HTTP.DELETE, - endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/infrastructure/hosts/{entity}/tags/{tag}") - - -def get_host_units_tenantwide(cluster, tenant, params=None): - consumed_host_units = 0 - host_list = get_hosts_tenantwide(cluster, tenant, params=params) - for host in host_list: - consumed_host_units = consumed_host_units + host['consumedHostUnits'] - return consumed_host_units - - -def get_oneagents_tenantwide(cluster, tenant, params=None): - oneagents = [] - next_page_key = 1 - - while next_page_key: - if next_page_key != 1: - params['nextPageKey'] = next_page_key - - response = rh.make_api_call(cluster=cluster, - endpoint=rh.TenantAPIs.ONEAGENTS, - tenant=tenant, - params=params) - - oneagents.extend(response.json().get('hosts')) - next_page_key = response.json().get('nextPageKey') - - return oneagents diff --git a/dynatrace/tenant/topology/process.py b/dynatrace/tenant/topology/process.py deleted file mode 100644 index fb6c523..0000000 --- a/dynatrace/tenant/topology/process.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Process operations from the Dynatrace API""" -import dynatrace.tenant.topology.shared as topology_shared - - -def get_processes_tenantwide(cluster, tenant, params=None): - """Get Information for all processes in a tenant""" - return topology_shared.get_env_layer_entities(cluster, tenant, 'processes', params=params) - - -def get_process(cluster, tenant, entity, params=None): - """Get Information on one process for in a tenant""" - return topology_shared.get_env_layer_entity(cluster, tenant, 'processes', entity, params=params) diff --git a/dynatrace/tenant/topology/process_groups.py b/dynatrace/tenant/topology/process_groups.py deleted file mode 100644 index e45d912..0000000 --- a/dynatrace/tenant/topology/process_groups.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Process Group operations from the Dynatrace API""" -import dynatrace.tenant.topology.shared as topology_shared - - -def get_process_groups_tenantwide(cluster, tenant): - """Get Information for all process-groups in a tenant""" - return topology_shared.get_env_layer_entities(cluster, tenant, 'process-groups') - - -def get_process_group(cluster, tenant, entity): - """Get Information on one process-group for in a tenant""" - return topology_shared.get_env_layer_entity(cluster, tenant, 'process-groups', entity) - - -def set_process_group_properties(cluster, tenant, entity, prop_json): - """Update properties of process-group entity""" - return topology_shared.set_env_layer_properties(cluster, tenant, 'process-groups', entity, prop_json) - - -def get_process_group_count_tenantwide(cluster, tenant, params=None): - """Get total count for all process-groups in a tenant""" - return topology_shared.get_env_layer_count(cluster, tenant, 'process-groups', params=params) - - -def get_process_group_count_clusterwide(cluster, params=None): - """Get total count for all process-groups in cluster""" - return topology_shared.get_cluster_layer_count(cluster, 'process-groups', params=params) - - -def get_process_group_count_setwide(full_set, params=None): - """Get total count of process-groups for all clusters defined in variable file""" - return topology_shared.get_set_layer_count(full_set, 'process-groups', params=params) - - -def add_process_group_tags(cluster, tenant, entity, tag_list): - """Add tags to a process group""" - return topology_shared.add_env_layer_tags(cluster, tenant, 'process-groups', entity, tag_list) diff --git a/dynatrace/tenant/topology/services.py b/dynatrace/tenant/topology/services.py deleted file mode 100644 index 6b4fe58..0000000 --- a/dynatrace/tenant/topology/services.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Service operations from the Dynatrace API""" -import dynatrace.tenant.topology.shared as topology_shared - - -def get_services_tenantwide(cluster, tenant): - """Get Information for all services in a tenant""" - return topology_shared.get_env_layer_entities(cluster, tenant, 'services') - - -def get_service(cluster, tenant, entity): - """Get Information on one service for in a tenant""" - return topology_shared.get_env_layer_entity(cluster, tenant, 'services', entity) - - -def set_service_properties(cluster, tenant, entity, prop_json): - """Update properties of service entity""" - return topology_shared.set_env_layer_properties(cluster, tenant, 'services', entity, prop_json) - - -def get_service_count_tenantwide(cluster, tenant, params=None): - """Get total count for all services in a tenant""" - return topology_shared.get_env_layer_count(cluster, tenant, 'services', params=params) - - -def get_service_count_clusterwide(cluster, params=None): - """Get total count for all services in cluster""" - return topology_shared.get_cluster_layer_count(cluster, 'services', params=params) - - -def get_service_count_setwide(full_set, params=None): - """Get total count of services for all clusters definied in variable file""" - return topology_shared.get_set_layer_count(full_set, 'services', params=params) - - -def add_service_tags(cluster, tenant, entity, tag_list): - """Add tags to a service""" - return topology_shared.add_env_layer_tags(cluster, tenant, 'services', entity, tag_list) diff --git a/dynatrace/tenant/topology/shared.py b/dynatrace/tenant/topology/shared.py deleted file mode 100644 index 32f35ef..0000000 --- a/dynatrace/tenant/topology/shared.py +++ /dev/null @@ -1,138 +0,0 @@ -"""Shared topology operations for multiple layers from the Dynatrace API""" -from dynatrace.requests import request_handler as rh -# Layer Compatibility -# 1. Get all entities - application, host, process, process group, service -# 1a. Count all entities -# 2. Get specific entity - application, host process, process group, service -# 3. Update properties of entity - application, custom, host, process group, service - -ENDPOINT_SUFFIX = { - 'applications': 'applications', - 'custom': "infrastructure/custom", - 'hosts': "infrastructure/hosts", - 'processes': "infrastructure/processes", - 'process-groups': "infrastructure/process-groups", - 'services': "infrastructure/services" -} - - -def check_valid_layer(layer, layer_list): - """Check if the operation is valid for the layer""" - if layer is None or layer_list is None: - raise TypeError('Provide layer and layer_list!') - if layer not in layer_list: - raise ValueError( - layer + " layer does not exist or is invalid for this use!") - - -def get_env_layer_entities(cluster, tenant, layer, params=None): - """Get all Entities of Specified Layer""" - layer_list = ['applications', 'hosts', - 'processes', 'process-groups', 'services'] - check_valid_layer(layer, layer_list) - - if not params: - params = {} - - response = rh.make_api_call( - cluster=cluster, - tenant=tenant, - endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/{ENDPOINT_SUFFIX[layer]}", - params=params - ) - return response.json() - - -def get_env_layer_entity(cluster, tenant, layer, entity, params=None): - """Get Entity Information for Specified Layer""" - layer_list = ['applications', 'hosts', - 'processes', 'process-groups', 'services'] - check_valid_layer(layer, layer_list) - - if not params: - params = {} - - response = rh.make_api_call( - cluster=cluster, - tenant=tenant, - endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/{ENDPOINT_SUFFIX[layer]}/{entity}", - params=params - ) - return response.json() - - -def set_env_layer_properties(cluster, tenant, layer, entity, prop_json): - """Update Properties of Entity""" - layer_list = ['applications', 'custom', - 'hosts', 'process-groups', 'services'] - check_valid_layer(layer, layer_list) - response = rh.make_api_call( - cluster=cluster, - tenant=tenant, - method=rh.HTTP.POST, - endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/{ENDPOINT_SUFFIX[layer]}/{entity}", - json=prop_json - ) - return response.status_code - - -def get_env_layer_count(cluster, tenant, layer, params=None): - """Get total hosts in an environment""" - if not params: - params = {} - - layer_list = ['applications', 'hosts', - 'processes', 'process-groups', 'services'] - - if 'relativeTime' not in params.keys(): - params['relativeTime'] = "day" - if 'includeDetails' not in params.keys(): - params['includeDetails'] = False - - check_valid_layer(layer, layer_list) - response = rh.make_api_call(cluster=cluster, - tenant=tenant, - endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/{ENDPOINT_SUFFIX[layer]}", - params=params) - env_layer_count = len(response.json()) - return env_layer_count - - -def get_cluster_layer_count(cluster, layer, params=None): - """Get total count for all environments in cluster""" - - if not params: - params = {} - - cluster_layer_count = 0 - for env_key in cluster['tenant']: - cluster_layer_count += get_env_layer_count(cluster=cluster, - tenant=env_key, - layer=layer, - params=params) - return cluster_layer_count - - -def get_set_layer_count(full_set, layer, params=None): - """Get total count for all clusters definied in variable file""" - if not params: - params = {} - - full_set_layer_count = 0 - for cluster in full_set.values(): - full_set_layer_count += get_cluster_layer_count(cluster, - layer, - params) - return full_set_layer_count - - -def add_env_layer_tags(cluster, tenant, layer, entity, tag_list): - layer_list = ['applications', 'hosts', - 'custom', 'process-groups', 'services'] - check_valid_layer(layer, layer_list) - if not tag_list: - raise TypeError("tag_list cannot be None type") - tag_json = { - 'tags': tag_list - } - return set_env_layer_properties(cluster, tenant, layer, entity, tag_json) \ No newline at end of file diff --git a/scripts/template.py b/scripts/template.py index 168724c..f108203 100644 --- a/scripts/template.py +++ b/scripts/template.py @@ -1,4 +1,5 @@ +"""Template to make your own scripts""" # IMPORTS GO HERE -import change_pythonpath # Must be first import +import change_pythonpath # Must be first import pylint: disable=unused-import -# YOUR SCRIPT GOES HERE \ No newline at end of file +# YOUR SCRIPT GOES HERE diff --git a/tests/mockserver_payloads/requests/hosts/tags.json b/tests/mockserver_payloads/requests/entities/tags.json similarity index 100% rename from tests/mockserver_payloads/requests/hosts/tags.json rename to tests/mockserver_payloads/requests/entities/tags.json diff --git a/tests/mockserver_payloads/requests/extensions/config.json b/tests/mockserver_payloads/requests/extensions/config.json new file mode 100644 index 0000000..6088e73 --- /dev/null +++ b/tests/mockserver_payloads/requests/extensions/config.json @@ -0,0 +1,6 @@ +{ + "extensionId": "custom.jmx.radujmx123456789", + "enabled": true, + "infraOnlyEnabled": false, + "properties": {} +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/management_zones/mz.json b/tests/mockserver_payloads/requests/management_zones/mz.json new file mode 100644 index 0000000..729e259 --- /dev/null +++ b/tests/mockserver_payloads/requests/management_zones/mz.json @@ -0,0 +1,203 @@ +{ + "name": "Mock_MZ", + "rules": [ + { + "type": "HOST", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "HOST_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "SERVICE", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "SERVICE_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "PROCESS_GROUP", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "PROCESS_GROUP_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "WEB_APPLICATION", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "WEB_APPLICATION_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "BROWSER_MONITOR", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "BROWSER_MONITOR_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "HTTP_MONITOR", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "HTTP_MONITOR_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "MOBILE_APPLICATION", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "MOBILE_APPLICATION_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "CUSTOM_DEVICE_GROUP", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "CUSTOM_DEVICE_GROUP_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "EXTERNAL_MONITOR", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "EXTERNAL_MONITOR_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/metrics/payload.txt b/tests/mockserver_payloads/requests/metrics/payload.txt new file mode 100644 index 0000000..b1c8306 --- /dev/null +++ b/tests/mockserver_payloads/requests/metrics/payload.txt @@ -0,0 +1,3 @@ +cpu.temperature,cpu=Intel,core=1 gauge,min=20,max=30,sum=50,count=2 +cpu.temperature,cpu=Intel,core=2 gauge,min=10,max=30,sum=40,count=2 +cpu.temperature,cpu=Intel,core=3 gauge,min=30,max=40,sum=70,count=2 \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/problems/comment.json b/tests/mockserver_payloads/requests/problems/comment.json new file mode 100644 index 0000000..c36b9ff --- /dev/null +++ b/tests/mockserver_payloads/requests/problems/comment.json @@ -0,0 +1,4 @@ +{ + "comment": "Test comment", + "context": "Test" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/processes/tags.json b/tests/mockserver_payloads/requests/processes/tags.json deleted file mode 100644 index dc89ff6..0000000 --- a/tests/mockserver_payloads/requests/processes/tags.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "tags": [ - "demo", - "example" - ] -} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/request_attributes/definition.json b/tests/mockserver_payloads/requests/request_attributes/definition.json new file mode 100644 index 0000000..46ae206 --- /dev/null +++ b/tests/mockserver_payloads/requests/request_attributes/definition.json @@ -0,0 +1,21 @@ +{ + "name": "Mock_ReqAttr_4", + "enabled": true, + "dataType": "STRING", + "dataSources": [ + { + "enabled": true, + "source": "REQUEST_HEADER", + "valueProcessing": { + "splitAt": "", + "trim": false + }, + "parameterName": "x-mock-header", + "capturingAndStorageLocation": "CAPTURE_AND_STORE_ON_SERVER" + } + ], + "normalization": "ORIGINAL", + "aggregation": "FIRST", + "confidential": false, + "skipPersonalDataMasking": false +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/request_attributes/updated.json b/tests/mockserver_payloads/requests/request_attributes/updated.json new file mode 100644 index 0000000..1fd0f56 --- /dev/null +++ b/tests/mockserver_payloads/requests/request_attributes/updated.json @@ -0,0 +1,22 @@ +{ + "id": "123abc456-a123-1234-4321-def123ghi45", + "name": "Mock_ReqAttr_1_update", + "enabled": true, + "dataType": "STRING", + "dataSources": [ + { + "enabled": true, + "source": "REQUEST_HEADER", + "valueProcessing": { + "splitAt": "", + "trim": false + }, + "parameterName": "x-mock-header", + "capturingAndStorageLocation": "CAPTURE_AND_STORE_ON_SERVER" + } + ], + "normalization": "ORIGINAL", + "aggregation": "FIRST", + "confidential": false, + "skipPersonalDataMasking": false +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/request_naming/definition.json b/tests/mockserver_payloads/requests/request_naming/definition.json new file mode 100644 index 0000000..bd04263 --- /dev/null +++ b/tests/mockserver_payloads/requests/request_naming/definition.json @@ -0,0 +1,34 @@ +{ + "enabled": true, + "namingPattern": "{mock} request name", + "managementZones": [], + "conditions": [ + { + "attribute": "SERVICE_REQUEST_ATTRIBUTE", + "comparisonInfo": { + "type": "STRING_REQUEST_ATTRIBUTE", + "comparison": "EXISTS", + "value": null, + "negate": false, + "requestAttribute": "Mock Request Attribute", + "caseSensitive": false, + "matchOnChildCalls": false, + "source": null + } + } + ], + "placeholders": [ + { + "name": "mock", + "attribute": "SERVICE_REQUEST_ATTRIBUTE", + "kind": "REGEX_EXTRACTION", + "delimiterOrRegex": "(^.*?$)", + "endDelimiter": null, + "requestAttribute": "Mock Request Attribute", + "normalization": "ORIGINAL", + "useFromChildCalls": null, + "aggregation": null, + "source": null + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/request_naming/updated.json b/tests/mockserver_payloads/requests/request_naming/updated.json new file mode 100644 index 0000000..b074a51 --- /dev/null +++ b/tests/mockserver_payloads/requests/request_naming/updated.json @@ -0,0 +1,34 @@ +{ + "enabled": true, + "namingPattern": "{mock} request name 123", + "managementZones": [], + "conditions": [ + { + "attribute": "SERVICE_REQUEST_ATTRIBUTE", + "comparisonInfo": { + "type": "STRING_REQUEST_ATTRIBUTE", + "comparison": "EXISTS", + "value": null, + "negate": false, + "requestAttribute": "Mock Request Attribute", + "caseSensitive": false, + "matchOnChildCalls": false, + "source": null + } + } + ], + "placeholders": [ + { + "name": "mock", + "attribute": "SERVICE_REQUEST_ATTRIBUTE", + "kind": "REGEX_EXTRACTION", + "delimiterOrRegex": "(^.*?$)", + "endDelimiter": null, + "requestAttribute": "Mock Request Attribute", + "normalization": "ORIGINAL", + "useFromChildCalls": null, + "aggregation": null, + "source": null + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/services/tags.json b/tests/mockserver_payloads/requests/services/tags.json deleted file mode 100644 index dc89ff6..0000000 --- a/tests/mockserver_payloads/requests/services/tags.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "tags": [ - "demo", - "example" - ] -} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/entities/get_all.json b/tests/mockserver_payloads/responses/entities/get_all.json new file mode 100644 index 0000000..eb816cf --- /dev/null +++ b/tests/mockserver_payloads/responses/entities/get_all.json @@ -0,0 +1,18 @@ +{ + "totalCount": 3, + "pageSize": 50, + "entities": [ + { + "entityId": "HOST-ABC123DEF456GHIJ", + "consumedHostUnits": 0.25 + }, + { + "entityId": "HOST-5B9CE4E4E14185FA", + "consumedHostUnits": 0.25 + }, + { + "entityId": "HOST-421D60DB4A2EA929", + "consumedHostUnits": 3.5 + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/entities/get_single.json b/tests/mockserver_payloads/responses/entities/get_single.json new file mode 100644 index 0000000..92d8082 --- /dev/null +++ b/tests/mockserver_payloads/responses/entities/get_single.json @@ -0,0 +1,10 @@ +{ + "totalCount": 1, + "pageSize": 50, + "entities": [ + { + "entityId": "HOST-ABC123DEF456GHIJ", + "consumedHostUnits": 0.25 + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/extensions/config.json b/tests/mockserver_payloads/responses/extensions/config.json new file mode 100644 index 0000000..6088e73 --- /dev/null +++ b/tests/mockserver_payloads/responses/extensions/config.json @@ -0,0 +1,6 @@ +{ + "extensionId": "custom.jmx.radujmx123456789", + "enabled": true, + "infraOnlyEnabled": false, + "properties": {} +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/extensions/details.json b/tests/mockserver_payloads/responses/extensions/details.json new file mode 100644 index 0000000..ca07917 --- /dev/null +++ b/tests/mockserver_payloads/responses/extensions/details.json @@ -0,0 +1,14 @@ +{ + "id": "custom.jmx.radujmx123456789", + "name": "radujmx", + "version": "1.123456789", + "type": "JMX", + "metricGroup": "custom.jmx.radujmx", + "metadata": { + "configurationVersions": [ + 0 + ], + "clusterVersion": "1.207" + }, + "properties": [] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/extensions/get_all.json b/tests/mockserver_payloads/responses/extensions/get_all.json new file mode 100644 index 0000000..ac86295 --- /dev/null +++ b/tests/mockserver_payloads/responses/extensions/get_all.json @@ -0,0 +1,20 @@ +{ + "extensions": [ + { + "id": "custom.jmx.radujmx123456789", + "name": "radujmx", + "type": "JMX" + }, + { + "id": "custom.python.oneagent_plugin", + "name": "OneAgent Plugin", + "type": "ONEAGENT" + }, + { + "id": "custom.remote.python.activegate_plugin", + "name": "ActiveGate Plugin", + "type": "ACTIVEGATE" + } + ], + "totalResults": 3 +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/extensions/instances.json b/tests/mockserver_payloads/responses/extensions/instances.json new file mode 100644 index 0000000..05f2510 --- /dev/null +++ b/tests/mockserver_payloads/responses/extensions/instances.json @@ -0,0 +1,8 @@ +{ + "configurationsList": [ + { + "id": "HOST-123ABC456GHI789J" + } + ], + "totalResults": 1 +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/extensions/metrics.json b/tests/mockserver_payloads/responses/extensions/metrics.json new file mode 100644 index 0000000..a3d4ba4 --- /dev/null +++ b/tests/mockserver_payloads/responses/extensions/metrics.json @@ -0,0 +1,14 @@ +{ + "nextPageKey": null, + "metrics": [ + { + "metricId": "ext:custom.jmx.radujmx.metric_test-count" + }, + { + "metricId": "ext:custom.jmx.radujmx.metric_test-avg" + }, + { + "metricId": "ext:custom.jmx.radujmx.metric_test-sum" + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/extensions/states.json b/tests/mockserver_payloads/responses/extensions/states.json new file mode 100644 index 0000000..4ed3395 --- /dev/null +++ b/tests/mockserver_payloads/responses/extensions/states.json @@ -0,0 +1,35 @@ +{ + "states": [ + { + "extensionId": "custom.jmx.radujmx123456789", + "version": null, + "endpointId": null, + "state": "OK", + "stateDescription": "", + "timestamp": 1605975229340, + "hostId": "HOST-1111111111111", + "processId": "PROCESS_GROUP_INSTANCE-1111111111111" + }, + { + "extensionId": "custom.jmx.radujmx123456789", + "version": null, + "endpointId": null, + "state": "OK", + "stateDescription": "", + "timestamp": 1605975229340, + "hostId": "HOST-222222222222222", + "processId": "PROCESS_GROUP_INSTANCE-222222222222222" + }, + { + "extensionId": "custom.jmx.radujmx123456789", + "version": null, + "endpointId": null, + "state": "OK", + "stateDescription": "", + "timestamp": 1605975229340, + "hostId": "HOST-333333333333", + "processId": "PROCESS_GROUP_INSTANCE-3333333333333" + } + ], + "totalResults": 3 +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/get_all.json b/tests/mockserver_payloads/responses/get_all.json deleted file mode 100644 index e69de29..0000000 diff --git a/tests/mockserver_payloads/responses/host_groups/mock_get_general_1.json b/tests/mockserver_payloads/responses/host_groups/mock_get_general_1.json deleted file mode 100644 index 2b383c5..0000000 --- a/tests/mockserver_payloads/responses/host_groups/mock_get_general_1.json +++ /dev/null @@ -1,34 +0,0 @@ -[{ - "entityId": "HOST-238441A17F95B305", - "displayName": "testserver", - "discoveredName": "testserver", - "firstSeenTimestamp": 1592513300463, - "lastSeenTimestamp": 1592980597441, - "tags": [], - "fromRelationships": {}, - "toRelationships": { - "isProcessOf": [], - "runsOn": [] - }, - "osType": "LINUX", - "osArchitecture": "X86", - "osVersion": "Debian GNU/Linux 10 (buster) (kernel 4.19.0-9-amd64)", - "bitness": "64bit", - "cpuCores": 1, - "logicalCpuCores": 2, - "monitoringMode": "FULL_STACK", - "networkZoneId": "default", - "agentVersion": { - "major": 1, - "minor": 195, - "revision": 54, - "timestamp": "20200529-113801", - "sourceRevision": "" - }, - "consumedHostUnits": 8.0, - "userLevel": "SUPERUSER", - "hostGroup": { - "meId": "HOST_GROUP-ABCDEFGH12345678", - "name": "HOST_GROUP_1" - } -}] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/hosts/get_all.json b/tests/mockserver_payloads/responses/hosts/get_all.json deleted file mode 100644 index bae3691..0000000 --- a/tests/mockserver_payloads/responses/hosts/get_all.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "entityId": "HOST-ABC123DEF456GHIJ", - "consumedHostUnits": 0.25 - }, - { - "entityId": "HOST-5B9CE4E4E14185FA", - "consumedHostUnits": 0.25 - }, - { - "entityId": "HOST-421D60DB4A2EA929", - "consumedHostUnits": 3.5 - } -] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/hosts/get_single.json b/tests/mockserver_payloads/responses/hosts/get_single.json deleted file mode 100644 index b27e2e7..0000000 --- a/tests/mockserver_payloads/responses/hosts/get_single.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "entityId": "HOST-ABC123DEF456GHIJ", - "consumedHostUnits": 0.25 -} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/management_zones/created.json b/tests/mockserver_payloads/responses/management_zones/created.json new file mode 100644 index 0000000..ae58097 --- /dev/null +++ b/tests/mockserver_payloads/responses/management_zones/created.json @@ -0,0 +1,3 @@ +{ + "id": "1234567890" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/management_zones/get_all.json b/tests/mockserver_payloads/responses/management_zones/get_all.json new file mode 100644 index 0000000..b54f797 --- /dev/null +++ b/tests/mockserver_payloads/responses/management_zones/get_all.json @@ -0,0 +1,16 @@ +{ + "values": [ + { + "id": "1234566779789879", + "name": "Mock_MZ_1" + }, + { + "id": "1234567890", + "name": "Mock_MZ" + }, + { + "id": "9987342936473", + "name": "Mock_MZ_2" + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/management_zones/get_mz.json b/tests/mockserver_payloads/responses/management_zones/get_mz.json new file mode 100644 index 0000000..729e259 --- /dev/null +++ b/tests/mockserver_payloads/responses/management_zones/get_mz.json @@ -0,0 +1,203 @@ +{ + "name": "Mock_MZ", + "rules": [ + { + "type": "HOST", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "HOST_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "SERVICE", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "SERVICE_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "PROCESS_GROUP", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "PROCESS_GROUP_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "WEB_APPLICATION", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "WEB_APPLICATION_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "BROWSER_MONITOR", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "BROWSER_MONITOR_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "HTTP_MONITOR", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "HTTP_MONITOR_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "MOBILE_APPLICATION", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "MOBILE_APPLICATION_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "CUSTOM_DEVICE_GROUP", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "CUSTOM_DEVICE_GROUP_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + }, + { + "type": "EXTERNAL_MONITOR", + "enabled": true, + "propagationTypes": [], + "conditions": [ + { + "key": { + "attribute": "EXTERNAL_MONITOR_TAGS" + }, + "comparisonInfo": { + "type": "TAG", + "operator": "EQUALS", + "value": { + "context": "CONTEXTLESS", + "key": "Application", + "value": "DemoApp" + }, + "negate": false + } + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/metrics/datapoints.json b/tests/mockserver_payloads/responses/metrics/datapoints.json new file mode 100644 index 0000000..17106ba --- /dev/null +++ b/tests/mockserver_payloads/responses/metrics/datapoints.json @@ -0,0 +1,22 @@ +{ + "totalCount": 1, + "nextPageKey": null, + "result": [ + { + "metricId": "builtin:host.mem.avail.pct", + "data": [ + { + "dimensions": [ + "HOST-ABC123DEF456GHI7" + ], + "timestamps": [ + 1605684060000 + ], + "values": [ + 29.36756863040855 + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/metrics/descriptors.json b/tests/mockserver_payloads/responses/metrics/descriptors.json new file mode 100644 index 0000000..8de223f --- /dev/null +++ b/tests/mockserver_payloads/responses/metrics/descriptors.json @@ -0,0 +1,42 @@ +{ + "totalCount": 3, + "nextPageKey": null, + "metrics": [ + { + "metricId": "builtin:host.mem.avail.bytes", + "dimensionDefinitions": [ + { + "key": "dt.entity.host", + "name": "Host", + "displayName": "Host", + "index": 0, + "type": "ENTITY" + } + ] + }, + { + "metricId": "builtin:host.mem.avail.pct", + "dimensionDefinitions": [ + { + "key": "dt.entity.host", + "name": "Host", + "displayName": "Host", + "index": 0, + "type": "ENTITY" + } + ] + }, + { + "metricId": "builtin:host.mem.avail.pfps", + "dimensionDefinitions": [ + { + "key": "dt.entity.host", + "name": "Host", + "displayName": "Host", + "index": 0, + "type": "ENTITY" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/oneagents/get_oneagents.json b/tests/mockserver_payloads/responses/oneagents/get_oneagents.json new file mode 100644 index 0000000..67f05db --- /dev/null +++ b/tests/mockserver_payloads/responses/oneagents/get_oneagents.json @@ -0,0 +1,22 @@ +{ + "hosts": [ + { + "hostInfo": { + "entityId": "HOST-ABC123DEF456GHIJ", + "consumedHostUnits": 0.25 + } + }, + { + "hostInfo": { + "entityId": "HOST-5B9CE4E4E14185FA", + "consumedHostUnits": 0.25 + } + }, + { + "hostInfo": { + "entityId": "HOST-421D60DB4A2EA929", + "consumedHostUnits": 3.5 + } + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/oneagents/v1_get_all_hosts.json b/tests/mockserver_payloads/responses/oneagents/v1_get_all_hosts.json new file mode 100644 index 0000000..8130b93 --- /dev/null +++ b/tests/mockserver_payloads/responses/oneagents/v1_get_all_hosts.json @@ -0,0 +1,14 @@ +[ + { + "entityId": "HOST-ABC123DEF456GHIJ", + "consumedHostUnits": 0.25 + }, + { + "entityId": "HOST-5B9CE4E4E14185FA", + "consumedHostUnits": 0.25 + }, + { + "entityId": "HOST-421D60DB4A2EA929", + "consumedHostUnits": 3.5 + } +] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/problems/get_all.json b/tests/mockserver_payloads/responses/problems/get_all.json new file mode 100644 index 0000000..6091a8e --- /dev/null +++ b/tests/mockserver_payloads/responses/problems/get_all.json @@ -0,0 +1,70 @@ +{ + "nextPageKey": null, + "totalCount": 2, + "problems": [ + { + "problemId": "-123456789_987654321V2", + "displayId": "P-123456", + "title": "Problem1", + "impactLevel": "INFRASTRUCTURE", + "severityLevel": "AVAILABILITY", + "status": "OPEN", + "affectedEntities": [ + { + "entityId": { + "id": "HOST-123456789", + "type": "HOST" + }, + "name": "DemoHost" + } + ], + "impactedEntities": [ + { + "entityId": { + "id": "HOST-123456789", + "type": "HOST" + }, + "name": "DemoHost" + } + ], + "rootCauseEntity": null, + "managementZones": [], + "entityTags": [], + "problemFilters": [], + "startTime": 1605168880219, + "endTime": -1 + }, + { + "problemId": "987654321_123456789V2", + "displayId": "P-98765", + "title": "Problem2", + "impactLevel": "APPLICATION", + "severityLevel": "AVAILABILITY", + "status": "OPEN", + "affectedEntities": [ + { + "entityId": { + "id": "HTTP_CHECK-123456789", + "type": "HTTP_CHECK" + }, + "name": "Demo Test" + } + ], + "impactedEntities": [ + { + "entityId": { + "id": "HTTP_CHECK-123456789", + "type": "HTTP_CHECK" + }, + "name": "Demo Test" + } + ], + "rootCauseEntity": null, + "managementZones": [], + "entityTags": [], + "problemFilters": [], + "startTime": 1605169224250, + "endTime": -1 + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/problems/get_comment.json b/tests/mockserver_payloads/responses/problems/get_comment.json new file mode 100644 index 0000000..5f74e8e --- /dev/null +++ b/tests/mockserver_payloads/responses/problems/get_comment.json @@ -0,0 +1,7 @@ +{ + "id": "123456789", + "createdAtTimestamp": 1606412761265, + "content": "test comment 1", + "authorName": "radu.stefan@mocktest.com", + "context": "Python API" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/problems/get_comments.json b/tests/mockserver_payloads/responses/problems/get_comments.json new file mode 100644 index 0000000..54b8f19 --- /dev/null +++ b/tests/mockserver_payloads/responses/problems/get_comments.json @@ -0,0 +1,20 @@ +{ + "totalCount": 2, + "pageSize": 10, + "comments": [ + { + "id": "123456789", + "createdAtTimestamp": 1606412761265, + "content": "test comment 1", + "authorName": "radu.stefan@mocktest.com", + "context": "Python API" + }, + { + "id": "987654321", + "createdAtTimestamp": 1606411416557, + "content": "", + "authorName": "radu.stefan@mocktest.com", + "context": "dynatrace-problem-close" + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/problems/get_one.json b/tests/mockserver_payloads/responses/problems/get_one.json new file mode 100644 index 0000000..3ba9349 --- /dev/null +++ b/tests/mockserver_payloads/responses/problems/get_one.json @@ -0,0 +1,32 @@ +{ + "problemId": "-123456789_987654321V2", + "displayId": "P-123456", + "title": "Problem1", + "impactLevel": "INFRASTRUCTURE", + "severityLevel": "AVAILABILITY", + "status": "OPEN", + "affectedEntities": [ + { + "entityId": { + "id": "HOST-123456789", + "type": "HOST" + }, + "name": "DemoHost" + } + ], + "impactedEntities": [ + { + "entityId": { + "id": "HOST-123456789", + "type": "HOST" + }, + "name": "DemoHost" + } + ], + "rootCauseEntity": null, + "managementZones": [], + "entityTags": [], + "problemFilters": [], + "startTime": 1605168880219, + "endTime": -1 +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/processes/get_all_pgis.json b/tests/mockserver_payloads/responses/processes/get_all_pgis.json deleted file mode 100644 index 121da81..0000000 --- a/tests/mockserver_payloads/responses/processes/get_all_pgis.json +++ /dev/null @@ -1,11 +0,0 @@ -[ - { - "entityId": "PROCESS_GROUP_INSTANCE-ABC123DEF456GHI7" - }, - { - "entityId": "PROCESS_GROUP_INSTANCE-A6AAFEA17E6F60FD" - }, - { - "entityId": "PROCESS_GROUP_INSTANCE-F0967E6BFEE20424" - } -] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/processes/get_all_pgs.json b/tests/mockserver_payloads/responses/processes/get_all_pgs.json deleted file mode 100644 index 73e086a..0000000 --- a/tests/mockserver_payloads/responses/processes/get_all_pgs.json +++ /dev/null @@ -1,11 +0,0 @@ -[ - { - "entityId": "PROCESS_GROUP-ABC123DEF456GHI7" - }, - { - "entityId": "PROCESS_GROUP-19DACA5E22637C33" - }, - { - "entityId": "PROCESS_GROUP-859E1549052CD876" - } -] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/processes/get_one_pg.json b/tests/mockserver_payloads/responses/processes/get_one_pg.json deleted file mode 100644 index 14223a1..0000000 --- a/tests/mockserver_payloads/responses/processes/get_one_pg.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "entityId": "PROCESS_GROUP-ABC123DEF456GHI7" -} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/processes/get_one_pgi.json b/tests/mockserver_payloads/responses/processes/get_one_pgi.json deleted file mode 100644 index 0898df1..0000000 --- a/tests/mockserver_payloads/responses/processes/get_one_pgi.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "entityId": "PROCESS_GROUP_INSTANCE-ABC123DEF456GHI7" -} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/request_attributes/Mock_ReqAttr_1.json b/tests/mockserver_payloads/responses/request_attributes/Mock_ReqAttr_1.json new file mode 100644 index 0000000..c1b0880 --- /dev/null +++ b/tests/mockserver_payloads/responses/request_attributes/Mock_ReqAttr_1.json @@ -0,0 +1,28 @@ +{ + "metadata": { + "configurationVersions": [ + 1 + ], + "clusterVersion": "1.208.55.20201217-163132" + }, + "id": "123abc456-a123-1234-4321-def123ghi45", + "name": "Mock_ReqAttr_1", + "enabled": true, + "dataType": "STRING", + "dataSources": [ + { + "enabled": true, + "source": "REQUEST_HEADER", + "valueProcessing": { + "splitAt": "", + "trim": false + }, + "parameterName": "x-mock-header", + "capturingAndStorageLocation": "CAPTURE_AND_STORE_ON_SERVER" + } + ], + "normalization": "ORIGINAL", + "aggregation": "FIRST", + "confidential": false, + "skipPersonalDataMasking": false +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/request_attributes/get_all.json b/tests/mockserver_payloads/responses/request_attributes/get_all.json new file mode 100644 index 0000000..1a277c9 --- /dev/null +++ b/tests/mockserver_payloads/responses/request_attributes/get_all.json @@ -0,0 +1,16 @@ +{ + "values": [ + { + "id": "123abc456-a123-1234-4321-def123ghi45", + "name": "Mock_ReqAttr_1" + }, + { + "id": "456jkl789-b321-3210-0123-abc456def32", + "name": "Mock_ReqAttr_2" + }, + { + "id": "789mno012-c456-6789-9876-mno123abc12", + "name": "Mock_ReqAttr_3" + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/request_attributes/get_all_one.json b/tests/mockserver_payloads/responses/request_attributes/get_all_one.json new file mode 100644 index 0000000..05b032e --- /dev/null +++ b/tests/mockserver_payloads/responses/request_attributes/get_all_one.json @@ -0,0 +1,8 @@ +{ + "values": [ + { + "id": "123abc456-a123-1234-4321-def123ghi45", + "name": "Mock_ReqAttr_1" + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/request_attributes/get_one.json b/tests/mockserver_payloads/responses/request_attributes/get_one.json new file mode 100644 index 0000000..c1b0880 --- /dev/null +++ b/tests/mockserver_payloads/responses/request_attributes/get_one.json @@ -0,0 +1,28 @@ +{ + "metadata": { + "configurationVersions": [ + 1 + ], + "clusterVersion": "1.208.55.20201217-163132" + }, + "id": "123abc456-a123-1234-4321-def123ghi45", + "name": "Mock_ReqAttr_1", + "enabled": true, + "dataType": "STRING", + "dataSources": [ + { + "enabled": true, + "source": "REQUEST_HEADER", + "valueProcessing": { + "splitAt": "", + "trim": false + }, + "parameterName": "x-mock-header", + "capturingAndStorageLocation": "CAPTURE_AND_STORE_ON_SERVER" + } + ], + "normalization": "ORIGINAL", + "aggregation": "FIRST", + "confidential": false, + "skipPersonalDataMasking": false +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/request_naming/get_all.json b/tests/mockserver_payloads/responses/request_naming/get_all.json new file mode 100644 index 0000000..de0c154 --- /dev/null +++ b/tests/mockserver_payloads/responses/request_naming/get_all.json @@ -0,0 +1,8 @@ +{ + "values": [ + { + "id": "abc1234def-1233-3321-ab123-abc123defghi", + "name": "{mock} request name" + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/request_naming/get_one.json b/tests/mockserver_payloads/responses/request_naming/get_one.json new file mode 100644 index 0000000..0b187c3 --- /dev/null +++ b/tests/mockserver_payloads/responses/request_naming/get_one.json @@ -0,0 +1,42 @@ + +{ + "metadata": { + "configurationVersions": [ + 0 + ], + "clusterVersion": "1.208.63.20201218-162412" + }, + "id": "abc1234def-1233-3321-ab123-abc123defghi", + "enabled": true, + "namingPattern": "{mock} request name", + "managementZones": [], + "conditions": [ + { + "attribute": "SERVICE_REQUEST_ATTRIBUTE", + "comparisonInfo": { + "type": "STRING_REQUEST_ATTRIBUTE", + "comparison": "EXISTS", + "value": null, + "negate": false, + "requestAttribute": "Mock Request Attribute", + "caseSensitive": false, + "matchOnChildCalls": false, + "source": null + } + } + ], + "placeholders": [ + { + "name": "mock", + "attribute": "SERVICE_REQUEST_ATTRIBUTE", + "kind": "REGEX_EXTRACTION", + "delimiterOrRegex": "(^.*?$)", + "endDelimiter": null, + "requestAttribute": "Mock Request Attribute", + "normalization": "ORIGINAL", + "useFromChildCalls": null, + "aggregation": null, + "source": null + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/request_naming/{mock} request name.json b/tests/mockserver_payloads/responses/request_naming/{mock} request name.json new file mode 100644 index 0000000..0d7e779 --- /dev/null +++ b/tests/mockserver_payloads/responses/request_naming/{mock} request name.json @@ -0,0 +1,41 @@ +{ + "metadata": { + "configurationVersions": [ + 0 + ], + "clusterVersion": "1.208.63.20201218-162412" + }, + "id": "abc1234def-1233-3321-ab123-abc123defghi", + "enabled": true, + "namingPattern": "{mock} request name", + "managementZones": [], + "conditions": [ + { + "attribute": "SERVICE_REQUEST_ATTRIBUTE", + "comparisonInfo": { + "type": "STRING_REQUEST_ATTRIBUTE", + "comparison": "EXISTS", + "value": null, + "negate": false, + "requestAttribute": "Mock Request Attribute", + "caseSensitive": false, + "matchOnChildCalls": false, + "source": null + } + } + ], + "placeholders": [ + { + "name": "mock", + "attribute": "SERVICE_REQUEST_ATTRIBUTE", + "kind": "REGEX_EXTRACTION", + "delimiterOrRegex": "(^.*?$)", + "endDelimiter": null, + "requestAttribute": "Mock Request Attribute", + "normalization": "ORIGINAL", + "useFromChildCalls": null, + "aggregation": null, + "source": null + } + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/services/get_all.json b/tests/mockserver_payloads/responses/services/get_all.json deleted file mode 100644 index bdcce80..0000000 --- a/tests/mockserver_payloads/responses/services/get_all.json +++ /dev/null @@ -1,11 +0,0 @@ -[ - { - "entityId": "SERVICE-ABC123DEF456GHI7" - }, - { - "entityId": "SERVICE-C096CE0BA471AEFD" - }, - { - "entityId": "SERVICE-B71ADA892013D156" - } -] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/services/get_one.json b/tests/mockserver_payloads/responses/services/get_one.json deleted file mode 100644 index f5c0619..0000000 --- a/tests/mockserver_payloads/responses/services/get_one.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "entityId": "SERVICE-ABC123DEF456GHI7" -} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/timeseries/get_predict.json b/tests/mockserver_payloads/responses/timeseries/get_predict.json new file mode 100644 index 0000000..79373ae --- /dev/null +++ b/tests/mockserver_payloads/responses/timeseries/get_predict.json @@ -0,0 +1,63 @@ +{ + "timeseriesId": "custom.test.timeseries", + "displayName": "Custom Mock Metric", + "dimensions": [ + "SERVICE" + ], + "aggregationTypes": [ + "COUNT" + ], + "unit": "Count (count)", + "filter": "BUILTIN", + "detailedSource": "Services", + "types": [], + "dataResult": { + "dataPoints": { + "SERVICE-ABC123DEF456GHI789J": [ + [ + 1607939400000, + 523.666655307729, + 410.15412668486834, + 635.8478203297005 + ], + [ + 1607939700000, + 603.0451778473598, + 472.15260627789496, + 732.1528174479977 + ], + [ + 1607940000000, + 459.95450718505396, + 335.8356721235074, + 587.664001040329 + ], + [ + 1607940300000, + 441.4722415900446, + 322.67147758176833, + 567.8571115102844 + ], + [ + 1607940600000, + 422.50195767708374, + 284.5712246898428, + 567.8243284420648 + ], + [ + 1607940900000, + 546.0887158520202, + 400.57725605634965, + 697.3510794865584 + ] + ] + }, + "timeseriesId": "custom.test.timeseries", + "unit": "Count (count)", + "resolutionInMillisUTC": 300000, + "aggregationType": "COUNT", + "entities": { + "SERVICE-ABC123DEF456GHI789J": "/MockService" + } + } +} \ No newline at end of file diff --git a/tests/special_test_settings_no_variables.py b/tests/special_test_settings_no_variables.py new file mode 100644 index 0000000..0a6bdff --- /dev/null +++ b/tests/special_test_settings_no_variables.py @@ -0,0 +1,29 @@ +"""Special Cases for settings which requires user_variables.py to NOT exist""" +import unittest +from os import rename + +USER_VARIABLES_CURRENT="user_variables.py" +USER_VARIABLES_MOVED="user_variables.py.tmp" +class TestSettingsWithoutVarFile(unittest.TestCase): + """Special test cases when user_variables is absent""" + def test_settings_without_var_file(self): + """Test should return default value when user_variables missing""" + rename(USER_VARIABLES_CURRENT, USER_VARIABLES_MOVED) + from dynatrace.framework import settings # pylint: disable=import-outside-toplevel + timezone = settings.get_setting("DEFAULT_TIMEZONE") + self.assertEqual(timezone, "UTC") + rename(USER_VARIABLES_MOVED, USER_VARIABLES_CURRENT) + def test_cluster_creation_from_nothing(self): + """Create Cluster without a preexisting user_variables""" + rename(USER_VARIABLES_CURRENT, USER_VARIABLES_MOVED) + from dynatrace.framework import settings # pylint: disable=import-outside-toplevel + new_cluster = settings.create_cluster("cluster", "test.site") + expected_cluster = { + "url": "test.site", + 'tenant': {}, + 'api_token': {}, + 'verify_ssl': True, + 'is_managed': True, + } + self.assertEqual(new_cluster, expected_cluster) + rename(USER_VARIABLES_MOVED, USER_VARIABLES_CURRENT) diff --git a/tests/test_entities.py b/tests/test_entities.py new file mode 100644 index 0000000..af4a10a --- /dev/null +++ b/tests/test_entities.py @@ -0,0 +1,238 @@ +""" +Test Suite for Entities API +""" +import unittest +from user_variables import FULL_SET # pylint: disable=import-error +from tests import tooling_for_test as testtools +from dynatrace.tenant.entities import EntityTypes +from dynatrace.framework.request_handler import TenantAPIs +from dynatrace.tenant import entities + +CLUSTER = FULL_SET["mockserver1"] +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.ENTITIES) +TAG_URL_PATH = str(TenantAPIs.TAGS) +REQUEST_DIR = "tests/mockserver_payloads/requests/entities" +RESPONSE_DIR = "tests/mockserver_payloads/responses/entities" + + +class TestGetEntities(unittest.TestCase): + """Tests cases for fetching entities.""" + + def test_get_entities(self): + """Test fetching all entities of given type tenant-wide""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + 'entitySelector': 'type(HOST)' + }, + response_file=response_file + ) + + result = entities.get_entities_tenantwide(CLUSTER, TENANT, EntityTypes.HOST) + expected_result = testtools.expected_payload(response_file).get('entities') + self.assertEqual(result, expected_result) + + def test_get_entities_clusterwide(self): + """Test fetching all entities of given type cluster-wide""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + 'entitySelector': 'type(HOST)' + }, + response_file=response_file + ) + + result = entities.get_entities_clusterwide(CLUSTER, EntityTypes.HOST) + expected_result = testtools.expected_payload(response_file).get('entities') + self.assertEqual(result, expected_result) + + def test_get_entities_setwide(self): + """Test fetching all entities of given type set-wide""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + 'entitySelector': 'type(HOST)' + }, + response_file=response_file + ) + + result = entities.get_entities_setwide(FULL_SET, EntityTypes.HOST) + expected_result = testtools.expected_payload(response_file).get('entities') + self.assertEqual(result, expected_result) + + def test_get_entity(self): + """Test fetching a single entity.""" + + host_id = "HOST-ABC123DEF456GHIJ" + response_file = f"{RESPONSE_DIR}/get_single.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + 'entitySelector': f'entityId({host_id})' + }, + response_file=response_file + ) + + result = entities.get_entity(CLUSTER, TENANT, host_id) + expected_result = testtools.expected_payload(response_file).get('entities')[0] + self.assertEqual(result, expected_result) + + def test_get_entities_by_page(self): + """Test fetching tenantwide entities by page""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + 'entitySelector': 'type(HOST)' + }, + response_file=response_file + ) + + result = entities.get_entities_by_page(CLUSTER, TENANT, EntityTypes.HOST) + expected_result = testtools.expected_payload(response_file).get('entities') + self.assertEqual(next(result), expected_result) + + def test_get_entity_count_tenantwide(self): + """Test getting the count of entities within a tenant.""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file, + parameters={ + 'from': 'now-24h', + 'pageSize': '1', + 'entitySelector': 'type(HOST)' + } + ) + + result = entities.get_entity_count_tenantwide(CLUSTER, TENANT, EntityTypes.HOST) + self.assertEqual(result, 3) + + def test_get_entity_count_clusterwide(self): + """Test getting the count of entities within a cluster.""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file, + parameters={ + 'from': 'now-24h', + 'pageSize': '1', + 'entitySelector': 'type(HOST)' + } + ) + + result = entities.get_entity_count_clusterwide(CLUSTER, EntityTypes.HOST) + self.assertEqual(result, 3) + + def test_get_entity_count_setwide(self): + """Test getting the count of entities within a full set.""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file, + parameters={ + 'from': 'now-24h', + 'pageSize': '1', + 'entitySelector': 'type(HOST)' + } + ) + + result = entities.get_entity_count_setwide(FULL_SET, EntityTypes.HOST) + self.assertEqual(result, 3) + + +class TestHostTagging(unittest.TestCase): + """Test cases for testing entity tagging.""" + + def test_add_tags(self): + """Test adding two tags to a specific entity.""" + + host_id = "HOST-ABC123DEF456GHIJ" + request_file = f"{REQUEST_DIR}/tags.json" + tags = ["demo", "example"] + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + request_type="POST", + url_path=TAG_URL_PATH, + request_file=request_file, + parameters={ + 'entitySelector': f'entityId({host_id})' + }, + response_code=201 + ) + + result = entities.add_tags( + cluster=CLUSTER, + tenant=TENANT, + tag_list=tags, + entitySelector=f'entityId({host_id})' + ) + self.assertEqual(result.status_code, 201) + + def test_delete_tags(self): + """Test deleting a tag from a specific host.""" + + host_id = "HOST-ABC123DEF456GHIJ" + tag = "demo" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=TAG_URL_PATH, + request_type="DELETE", + response_code=204 + ) + + result = entities.delete_tag( + cluster=CLUSTER, + tenant=TENANT, + tag_key=tag, + entitySelector=f'entityId({host_id})' + ) + self.assertEqual(204, result.status_code) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_extensions.py b/tests/test_extensions.py new file mode 100644 index 0000000..12b00d2 --- /dev/null +++ b/tests/test_extensions.py @@ -0,0 +1,210 @@ +"""Test Suite for the Extensions API""" +import unittest +import json +from user_variables import FULL_SET # pylint: disable=import-error +from tests import tooling_for_test as testtools +from dynatrace.framework.request_handler import TenantAPIs, HTTP +from dynatrace.tenant import extensions + +CLUSTER = FULL_SET["mockserver1"] +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.EXTENSIONS) +EXTENSION_ID = "custom.jmx.radujmx123456789" +INSTANCE_ID = "HOST-ABC123DEF456GHI7" +REQUEST_DIR = "tests/mockserver_payloads/requests/extensions" +RESPONSE_DIR = "tests/mockserver_payloads/responses/extensions" + + +class TestGetExtensions(unittest.TestCase): + """Test cases for fetching extensions and their details""" + + def test_get_all_extensions(self): + """Test fetching a list of extensions from a tenant""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = extensions.get_all_extensions(CLUSTER, TENANT) + expected_result = testtools.expected_payload(response_file).get('extensions') + + self.assertEqual(result, expected_result) + + def test_get_extension_details(self): + """Test fetching the details of an extension""" + response_file = f"{RESPONSE_DIR}/details.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{EXTENSION_ID}", + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = extensions.get_extension_details(CLUSTER, TENANT, EXTENSION_ID) + expected_result = testtools.expected_payload(response_file) + + self.assertEqual(result, expected_result) + + def test_get_extension_instances(self): + """Test fetching the list of instances for an extension""" + response_file = f"{RESPONSE_DIR}/instances.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{EXTENSION_ID}/instances", + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = extensions.get_extension_instances(CLUSTER, TENANT, EXTENSION_ID) + expected_result = testtools.expected_payload( + response_file + ).get('configurationsList') + + self.assertEqual(result, expected_result) + + def test_get_extension_states(self): + """Test fetching the list of states for an extension""" + response_file = f"{RESPONSE_DIR}/states.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{EXTENSION_ID}/states", + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = extensions.get_extension_states(CLUSTER, TENANT, EXTENSION_ID) + expected_result = testtools.expected_payload(response_file).get('states') + + self.assertEqual(result, expected_result) + + def test_get_extension_global_config(self): + """Test fetching the global configuration of an extension""" + response_file = f"{RESPONSE_DIR}/config.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{EXTENSION_ID}/global", + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = extensions.get_extension_global_config(CLUSTER, TENANT, EXTENSION_ID) + expected_result = testtools.expected_payload(response_file) + + self.assertEqual(result, expected_result) + + def test_get_extension_instance_config(self): + """Test fetching a configuration instance for an extension""" + response_file = f"{RESPONSE_DIR}/config.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{EXTENSION_ID}/instances/{INSTANCE_ID}", + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = extensions.get_extension_instance_config( + CLUSTER, TENANT, EXTENSION_ID, INSTANCE_ID + ) + expected_result = testtools.expected_payload(response_file) + + self.assertEqual(result, expected_result) + + def test_get_extension_metrics(self): + """Tests fetching the metrics collected by an extension""" + details_file = f"{RESPONSE_DIR}/details.json" + response_file = f"{RESPONSE_DIR}/metrics.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{EXTENSION_ID}", + request_type=str(HTTP.GET), + response_file=details_file, + mock_id="first" + ) + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=str(TenantAPIs.METRICS), + parameters={ + "metricSelector": "ext:custom.jmx.radujmx.*" + }, + mock_id="second", + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = extensions.get_extension_metrics(CLUSTER, TENANT, EXTENSION_ID) + expected_result = list( + m.get('metricId') + for m in testtools.expected_payload(response_file).get('metrics') + ) + + self.assertEqual(result, expected_result) + + +class TestModifyExtensions(unittest.TestCase): + """Test cases for modifying extension states and details""" + + def test_update_global_config(self): + """Test updating the global config for an extension""" + request_file = f"{REQUEST_DIR}/config.json" + with open(request_file, "r") as config_file: + config = json.loads(config_file.read()) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{EXTENSION_ID}/global", + request_type=str(HTTP.PUT), + request_file=request_file, + response_code=202 + ) + + result = extensions.update_global_config( + CLUSTER, TENANT, EXTENSION_ID, config + ).status_code + expected_result = 202 + + self.assertEqual(result, expected_result) + + def test_update_instance_config(self): + """Test updating an instance of configuration for an extension""" + request_file = f"{REQUEST_DIR}/config.json" + with open(request_file, "r") as config_file: + config = json.loads(config_file.read()) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{EXTENSION_ID}/instances/{INSTANCE_ID}", + request_type=str(HTTP.PUT), + request_file=request_file, + response_code=202 + ) + + result = extensions.update_instance_config( + CLUSTER, TENANT, EXTENSION_ID, INSTANCE_ID, config + ).status_code + expected_result = 202 + + self.assertEqual(result, expected_result) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_host_groups.py b/tests/test_host_groups.py deleted file mode 100644 index 2ce311a..0000000 --- a/tests/test_host_groups.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Testing dynatrace.tenant.host_groups""" -import unittest -import user_variables -from tests import tooling_for_test -from dynatrace.tenant import host_groups - -CLUSTER = user_variables.FULL_SET["mockserver1"] -TENANT = "tenant1" -URL_PATH = "/api/v1/entity/infrastructure/hosts" - - -class TestHostGroupFunctions(unittest.TestCase): - RESPONSE_DIR = "tests/mockserver_payloads/responses/host_groups/" - - def test_get_host_groups_tenantwide(self): - parameters = { - "relativeTime": ["day"], - "includeDetails": ["true"], - } - mockserver_response_file = f"{self.RESPONSE_DIR}mock_get_general_1.json" - tooling_for_test.create_mockserver_expectation( - CLUSTER, TENANT, URL_PATH, "GET", parameters=parameters, response_file=mockserver_response_file) - command_tested = host_groups.get_host_groups_tenantwide( - CLUSTER, TENANT) - - expected_result = { - 'HOST_GROUP-ABCDEFGH12345678': 'HOST_GROUP_1' - } - self.assertEqual(command_tested, expected_result) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/test_maintenance_windows.py b/tests/test_maintenance_windows.py index e76709a..34acb0a 100644 --- a/tests/test_maintenance_windows.py +++ b/tests/test_maintenance_windows.py @@ -1,12 +1,12 @@ """Test Cases For Maintenance Windows.""" import unittest -import user_variables from tests import tooling_for_test from dynatrace.tenant import maintenance -from dynatrace.requests.request_handler import TenantAPIs -from dynatrace.exceptions import InvalidDateFormatException +from dynatrace.framework.request_handler import TenantAPIs +from dynatrace.framework.settings import get_cluster_dict +from dynatrace.framework.exceptions import InvalidDateFormatException -CLUSTER = user_variables.FULL_SET["mockserver1"] +CLUSTER = get_cluster_dict("mockserver1") TENANT = "tenant1" URL_PATH = str(TenantAPIs.MAINTENANCE_WINDOWS) TEST_RANGE_START = "2020-01-01 00:00" @@ -178,7 +178,6 @@ def test_create_once_no_scope(self): ) maintenance_schedule = maintenance.generate_schedule( maintenance.RecurrenceType.ONCE, - #TODO Remove need for these variables. ONCE does not use them "23:00", 60, TEST_RANGE_START, @@ -210,7 +209,6 @@ def test_create_weekly_no_scope(self): ) maintenance_schedule = maintenance.generate_schedule( maintenance.RecurrenceType.WEEKLY, - #TODO Remove need for these variables. ONCE does not use them "23:00", 60, TEST_RANGE_START, @@ -243,7 +241,6 @@ def test_create_monthly_no_scope(self): ) maintenance_schedule = maintenance.generate_schedule( maintenance.RecurrenceType.MONTHLY, - #TODO Remove need for these variables. ONCE does not use them "23:00", 60, TEST_RANGE_START, @@ -261,41 +258,45 @@ def test_create_monthly_no_scope(self): self.assertEqual(result, tooling_for_test.expected_payload( mockserver_response_file)) + class TestMaintenanceExceptions(unittest.TestCase): + """Series of Tests aimed at triggering exception""" def test_invalid_recurrence_type(self): """Testing exception thrown for invalid recurrence type""" with self.assertRaises(ValueError) as context: maintenance.generate_schedule( - "HOURLY", - "23:00", - 60, - TEST_RANGE_START, - TEST_RANGE_END, + "HOURLY", + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, ) + self.assertTrue("Invalid Recurrence Type!" in str(context.exception)) + def test_invalid_day_of_week(self): """Testing exception thrown for invalid dayOfWeek""" with self.assertRaises(ValueError) as context: maintenance.generate_schedule( - maintenance.RecurrenceType.WEEKLY, - "23:00", - 60, - TEST_RANGE_START, - TEST_RANGE_END, - day=1 + maintenance.RecurrenceType.WEEKLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + day=1 ) self.assertTrue("Invalid Weekly Day!" in str(context.exception)) - + def test_invalid_day_of_month_value(self): """Testing exception thrown for invalid dayOfMonth for incorrect int""" with self.assertRaises(ValueError) as context: maintenance.generate_schedule( - maintenance.RecurrenceType.MONTHLY, - "23:00", - 60, - TEST_RANGE_START, - TEST_RANGE_END, - day=32 + maintenance.RecurrenceType.MONTHLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + day=32 ) self.assertTrue("Invalid Monthly Day!" in str(context.exception)) @@ -303,24 +304,25 @@ def test_invalid_day_of_month_type(self): """Testing exception thrown for invalid dayOfMonth for a non-int""" with self.assertRaises(TypeError) as context: maintenance.generate_schedule( - maintenance.RecurrenceType.MONTHLY, - "23:00", - 60, - TEST_RANGE_START, - TEST_RANGE_END, - day="Eleven" + maintenance.RecurrenceType.MONTHLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + day="Eleven" ) - self.assertTrue("Invalid type for Day of Month! Int between 1-31 required" in str(context.exception)) + self.assertTrue( + "Invalid type for Day of Month! Int between 1-31 required" in str(context.exception)) def test_no_day_of_week_supplied(self): """Weekly Maintenance Window with no dayOfWeek supplied""" with self.assertRaises(Exception) as context: maintenance.generate_schedule( - maintenance.RecurrenceType.WEEKLY, - "23:00", - 60, - TEST_RANGE_START, - TEST_RANGE_END, + maintenance.RecurrenceType.WEEKLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, ) self.assertTrue("Invalid Weekly Day!" in str(context.exception)) @@ -328,79 +330,105 @@ def test_no_day_of_month_supplied(self): """Monthly Maintenance Window with no dayOfMonth supplied""" with self.assertRaises(Exception) as context: maintenance.generate_schedule( - maintenance.RecurrenceType.MONTHLY, - "23:00", - 60, - TEST_RANGE_START, - TEST_RANGE_END, + maintenance.RecurrenceType.MONTHLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, ) - self.assertTrue("Invalid type for Day of Month!" in str(context.exception)) + self.assertTrue( + "Invalid type for Day of Month!" in str(context.exception)) def test_invalid_datetime_format(self): """Test invalid datetime supplied to trigger ValueError""" - #TODO Fix Exceoption to have a message as first arg with self.assertRaises(InvalidDateFormatException) as context: maintenance.generate_schedule( - maintenance.RecurrenceType.DAILY, - "23:00", - 60, - TEST_RANGE_START, - "2020-01-02" + maintenance.RecurrenceType.DAILY, + "23:00", + 60, + TEST_RANGE_START, + "2020-01-02" ) - self.assertTrue("Incorrect Date " in context.exception.message, context.exception.message) + self.assertTrue( + "Incorrect Date " in context.exception.message, context.exception.message) + def test_invalid_filter_type(self): """Invalid Filter_Type""" with self.assertRaises(ValueError) as context: maintenance.generate_scope( - tags=[{'context': "CONTEXTLESS", 'key': "testing"}], - filter_type="INVALID_TYPE" + tags=[{'context': "CONTEXTLESS", 'key': "testing"}], + filter_type="INVALID_TYPE" ) - self.assertTrue("Invalid Filter Type" in (msg := str(context.exception)), msg) + self.assertTrue("Invalid Filter Type" in ( + msg := str(context.exception)), msg) # pylint: disable=used-before-assignment class TestMaintenanceEnumTypes(unittest.TestCase): + """Test to validate Maintenance Enum Types are correct""" def test_suppression_enum_str(self): - suppression = maintenance.Suppression(maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT) - self.assertIsInstance(maintenance.Suppression.__str__(suppression), str) + """Suppression enum str should be string""" + suppression = maintenance.Suppression( + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT) + self.assertIsInstance( + maintenance.Suppression.__str__(suppression), str) def test_suppression_enum_repr(self): - suppression = maintenance.Suppression(maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT) - self.assertIsInstance(maintenance.Suppression.__repr__(suppression), str) + """Suppression enum repr should be string""" + suppression = maintenance.Suppression( + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT) + self.assertIsInstance( + maintenance.Suppression.__repr__(suppression), str) def test_day_of_week_enum_str(self): + """Day of Week enum str should be string""" day_of_week = maintenance.DayOfWeek(maintenance.DayOfWeek.MONDAY) self.assertIsInstance(maintenance.DayOfWeek.__str__(day_of_week), str) def test_day_of_week_enum_repr(self): + """Day of Week enum repr should be string""" day_of_week = maintenance.DayOfWeek(maintenance.DayOfWeek.MONDAY) self.assertIsInstance(maintenance.DayOfWeek.__repr__(day_of_week), str) def test_context_enum_str(self): + """Context enum str should be string""" context = maintenance.Context(maintenance.Context.CONTEXTLESS) self.assertIsInstance(maintenance.Context.__str__(context), str) def test_context_enum_repr(self): + """Context enum repr should be string""" context = maintenance.Context(maintenance.Context.CONTEXTLESS) self.assertIsInstance(maintenance.Context.__repr__(context), str) def test_recurrence_type_enum_str(self): - recurrence_type = maintenance.RecurrenceType(maintenance.RecurrenceType.DAILY) - self.assertIsInstance(maintenance.RecurrenceType.__str__(recurrence_type), str) + """Recurrence Type enum str should be string""" + recurrence_type = maintenance.RecurrenceType( + maintenance.RecurrenceType.DAILY) + self.assertIsInstance( + maintenance.RecurrenceType.__str__(recurrence_type), str) def test_recurrence_type_enum_repr(self): - recurrence_type = maintenance.RecurrenceType(maintenance.RecurrenceType.DAILY) - self.assertIsInstance(maintenance.RecurrenceType.__repr__(recurrence_type), str) + """Recurrence Type enum repr should be string""" + recurrence_type = maintenance.RecurrenceType( + maintenance.RecurrenceType.DAILY) + self.assertIsInstance( + maintenance.RecurrenceType.__repr__(recurrence_type), str) def test_filter_type_enum_str(self): - suppression = maintenance.FilterType(maintenance.FilterType.APM_SECURITY_GATEWAY) + """Filter Type enum str should be string""" + suppression = maintenance.FilterType( + maintenance.FilterType.APM_SECURITY_GATEWAY) self.assertIsInstance(maintenance.FilterType.__str__(suppression), str) def test_filter_type_enum_repr(self): - suppression = maintenance.FilterType(maintenance.FilterType.APM_SECURITY_GATEWAY) - self.assertIsInstance(maintenance.FilterType.__repr__(suppression), str) + """Filter Type enum repr should be string""" + suppression = maintenance.FilterType( + maintenance.FilterType.APM_SECURITY_GATEWAY) + self.assertIsInstance( + maintenance.FilterType.__repr__(suppression), str) class TestTagParsing(unittest.TestCase): + """Testing Maintenance Window Tag Handling""" def test_tag_variations(self): """Testing various ways tags need to be parsed""" # Test 1 - Key @@ -413,38 +441,41 @@ def test_tag_variations(self): # Test 8 - Context, Key with square brackets # Test 9 - Context, Key with colon and squares # Test 10 - Empty Context with squares - + test_tag_list = [ - "Key", - "Key:Value", - "[Context]Key:Value", - "Key:withColon:Value", - "Key:withColon:", - "[Context]Key:withColon:Value", - "[Context]Key", - "[Context][KeywithSquares]", - "[Context][KeyWithSquares]:AndColons:Value", - "[][KeywithSquares]", + "Key", + "Key:Value", + "[Context]Key:Value", + "Key:withColon:Value", + "Key:withColon:", + "[Context]Key:withColon:Value", + "[Context]Key", + "[Context][KeywithSquares]", + "[Context][KeyWithSquares]:AndColons:Value", + "[][KeywithSquares]", ] test_tag_expected_results = [ - {'context': 'CONTEXTLESS', 'key': 'Key'}, - {'context': 'CONTEXTLESS', 'key': 'Key:Value'}, - {'context': 'Context', 'key': 'Key:Value'}, - {'context': 'CONTEXTLESS', 'key': 'Key:withColon:Value'}, - {'context': 'CONTEXTLESS', 'key': 'Key:withColon:'}, - {'context': 'Context', 'key': 'Key:withColon:Value'}, - {'context': 'Context', 'key': 'Key'}, - {'context': 'Context', 'key': '[KeywithSquares]'}, - {'context': 'Context', 'key': '[KeyWithSquares]:AndColons:Value'}, - {'context': 'CONTEXTLESS', 'key': '[][KeywithSquares]'}, + {'context': 'CONTEXTLESS', 'key': 'Key'}, + {'context': 'CONTEXTLESS', 'key': 'Key:Value'}, + {'context': 'Context', 'key': 'Key:Value'}, + {'context': 'CONTEXTLESS', 'key': 'Key:withColon:Value'}, + {'context': 'CONTEXTLESS', 'key': 'Key:withColon:'}, + {'context': 'Context', 'key': 'Key:withColon:Value'}, + {'context': 'Context', 'key': 'Key'}, + {'context': 'Context', 'key': '[KeywithSquares]'}, + {'context': 'Context', + 'key': '[KeyWithSquares]:AndColons:Value'}, + {'context': 'CONTEXTLESS', 'key': '[][KeywithSquares]'}, ] - for i in range(0, len(test_tag_list)): - processed_tag = test_tag_list[i] + for i, test_tag_input in enumerate(test_tag_list): + processed_tag = test_tag_input self.assertTrue( - (result := maintenance.parse_tag(processed_tag)) == test_tag_expected_results[i], - f"Test {i}: {result} did not match {test_tag_expected_results[i]}") + (result := maintenance.parse_tag(processed_tag) + ) == test_tag_expected_results[i], + f"Test {i}: {result} did not match {test_tag_expected_results[i]}") + if __name__ == '__main__': unittest.main() @@ -465,4 +496,4 @@ def test_tag_variations(self): # GET ALL WINDOWS # GET DETAILS OF WINDOW # DELETE WINDOW -# UPDATE WINDOW \ No newline at end of file +# UPDATE WINDOW diff --git a/tests/test_management_zones.py b/tests/test_management_zones.py new file mode 100644 index 0000000..6060e54 --- /dev/null +++ b/tests/test_management_zones.py @@ -0,0 +1,231 @@ +"""Test Suite for Management Zone operations""" + +import unittest +import tests.tooling_for_test as testtools +from dynatrace.tenant import management_zones +from dynatrace.framework.settings import get_cluster_dict +from dynatrace.framework.request_handler import TenantAPIs, HTTP + +CLUSTER = get_cluster_dict("mockserver1") +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.MANAGEMENT_ZONES) +RESPONSE_DIR = "tests/mockserver_payloads/responses/management_zones" +REQUEST_DIR = "tests/mockserver_payloads/requests/management_zones" +MZ_ID = "1234567890" +MZ_NAME = "Mock_MZ" +TAGS = [("CONTEXTLESS", "Application", "DemoApp")] + + +class TestUtils(unittest.TestCase): + """Test cases for utility functions, separate from API""" + + def test_rule_types_enum(self): + """Tests the RuleTypes enum. Must return object name""" + result = str(management_zones.RuleTypes.HTTP_MONITOR) + expected_result = management_zones.RuleTypes.HTTP_MONITOR.name + + self.assertEqual(result, expected_result) + + def test_generate_template(self): + """Tests generating a standard MZ template""" + mz_json_file = f"{RESPONSE_DIR}/get_mz.json" + + result = management_zones.generate_mz_template(MZ_NAME, TAGS) + expected_result = testtools.expected_payload(mz_json_file) + + self.assertEqual(result, expected_result) + + def test_import_mz_from_file(self): + """Tests importing a MZ JSON from file""" + mz_json_file = f"{RESPONSE_DIR}/get_mz.json" + + result = management_zones.import_mz_from_file(mz_json_file) + expected_result = testtools.expected_payload(mz_json_file) + + self.assertEqual(result, expected_result) + + +class TestFetchingMZs(unittest.TestCase): + """Test cases for fetching Management Zones and their details""" + + def test_get_all_management_zones(self): + """Tests fetching all the Managment Zones in the tenant""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = management_zones.get_all_management_zones(CLUSTER, TENANT) + expected_result = testtools.expected_payload(response_file).get("values") + + self.assertEqual(result, expected_result) + + def test_get_management_zone_details(self): + """Tests fetching the details of a management zone.""" + response_file = f"{RESPONSE_DIR}/get_mz.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{MZ_ID}", + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = management_zones.get_management_zone_details(CLUSTER, TENANT, MZ_ID) + expected_result = testtools.expected_payload(response_file) + + self.assertEqual(result, expected_result) + + def test_get_management_zone_id(self): + """Tests fetching the ID for a Management Zone referenced by name""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_file=response_file + ) + + result = management_zones.get_management_zone_id(CLUSTER, TENANT, MZ_NAME) + expected_result = MZ_ID + + self.assertEqual(result, expected_result) + + +class TestModifyingManagementZones(unittest.TestCase): + """Test cases for making changes to Management Zones within a tenant""" + + def test_add_management_zone(self): + """Tests adding a new management zone to a tenant""" + request_file = f"{REQUEST_DIR}/mz.json" + response_file = f"{RESPONSE_DIR}/created.json" + mz_json = testtools.expected_payload(request_file) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.POST), + request_file=request_file, + response_code=201, + response_file=response_file + ) + + result = management_zones.add_management_zone(CLUSTER, TENANT, mz_json) + expected_result = MZ_ID + + self.assertEqual(result, expected_result) + + def test_update_management_zone(self): + """Tests updating an existing Management Zone in a tenant""" + request_file = f"{REQUEST_DIR}/mz.json" + mz_json = testtools.expected_payload(request_file) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{MZ_ID}", + request_type=str(HTTP.PUT), + request_file=request_file, + response_code=201 + ) + + result = management_zones.update_management_zone( + CLUSTER, TENANT, MZ_ID, mz_json + ).status_code + expected_result = 201 + + self.assertEqual(result, expected_result) + + def test_delete_management_zone_by_id(self): + """Tests deleting a Management Zone referenced by ID""" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{MZ_ID}", + request_type=str(HTTP.DELETE), + response_code=204 + ) + + result = management_zones.delete_management_zone_by_id( + CLUSTER, TENANT, MZ_ID + ).status_code + expected_result = 204 + + self.assertEqual(result, expected_result) + + def test_delete_management_zone_by_name(self): + """Tests deleting a Management Zone referenced by name""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}", + request_type=str(HTTP.GET), + response_file=response_file, + mock_id="Req1" + ) + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{MZ_ID}", + request_type=str(HTTP.DELETE), + response_code=204, + mock_id="Req2" + ) + + result = management_zones.delete_management_zone_by_name( + CLUSTER, TENANT, MZ_NAME + ).status_code + expected_result = 204 + + self.assertEqual(result, expected_result) + + +class TestErrorHandling(unittest.TestCase): + """Test cases for error handling within Management Zone operations""" + + def test_generate_template_tags_not_list(self): + """Tests error handling when generating a standard MZ template. + Tags must be given as a list object. + """ + with self.assertRaises(ValueError): + management_zones.generate_mz_template(MZ_NAME, "wrong_tags") + + def test_generate_template_tags_not_tuples(self): + """Tests error handling when generating a standard MZ template. + Tags list must contain only tuples. + """ + with self.assertRaises(ValueError): + management_zones.generate_mz_template(MZ_NAME, ["wrong_tags"]) + + def test_delete_mz_not_found(self): + """Tests error handling when deleting a Management Zone by name. + Management Zone must return an ID. + """ + wrong_name = "non_existing_mz" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}", + request_type=str(HTTP.GET), + response_file=response_file + ) + + with self.assertRaises(RuntimeError): + management_zones.delete_management_zone_by_name(CLUSTER, TENANT, wrong_name) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_metrics.py b/tests/test_metrics.py new file mode 100644 index 0000000..4be5bbc --- /dev/null +++ b/tests/test_metrics.py @@ -0,0 +1,149 @@ +""" +Test Suite for Metrics API +""" +import unittest +from tests import tooling_for_test as testtools +from dynatrace.framework.request_handler import TenantAPIs +from dynatrace.framework.settings import get_cluster_dict +from dynatrace.tenant import metrics + +CLUSTER = get_cluster_dict("mockserver1") +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.METRICS) +METRIC_SELECTOR = 'builtin:host.mem.avail.*' +REQUEST_DIR = "tests/mockserver_payloads/requests/metrics" +RESPONSE_DIR = "tests/mockserver_payloads/responses/metrics" + + +class TestGetMetrics(unittest.TestCase): + """Tests cases for fetching metrics and their details.""" + + def test_get_metric_descriptor(self): + """Test fetching descriptors for metrics matching selector.""" + response_file = f"{RESPONSE_DIR}/descriptors.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + "metricSelector": METRIC_SELECTOR + }, + response_file=response_file + ) + + result = metrics.get_metric_descriptor( + CLUSTER, TENANT, **{'metricSelector': METRIC_SELECTOR} + ) + expected_result = testtools.expected_payload(response_file).get('metrics') + self.assertEqual(result, expected_result) + + def test_get_metric_count(self): + """Test fetching the count of metrics matching selector.""" + response_file = f"{RESPONSE_DIR}/descriptors.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + "metricSelector": METRIC_SELECTOR + }, + response_file=response_file + ) + + result = metrics.get_metric_count( + CLUSTER, TENANT, **{'metricSelector': METRIC_SELECTOR} + ) + expected_result = testtools.expected_payload(response_file).get('totalCount') + self.assertEqual(result, expected_result) + + def test_get_metric_data(self): + """Test fetching datapoints for metrics matching selector.""" + response_file = f"{RESPONSE_DIR}/datapoints.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/query", + request_type="GET", + parameters={ + "metricSelector": "builtin:host.mem.avail.pct", + "resolution": "Inf" + }, + response_file=response_file + ) + + result = metrics.get_metric_data( + CLUSTER, TENANT, **{'metricSelector': 'builtin:host.mem.avail.pct', + 'resolution': 'Inf'} + ) + data = testtools.expected_payload(response_file).get('result')[0].get('data') + expected_result = {'builtin:host.mem.avail.pct': data} + self.assertEqual(result, expected_result) + + def test_get_metric_dimension_count(self): + """Test fetching dimension count for metrics matching selector.""" + response_file = f"{RESPONSE_DIR}/descriptors.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + "metricSelector": METRIC_SELECTOR + }, + response_file=response_file + ) + + result = metrics.get_metric_dimension_count(CLUSTER, TENANT, METRIC_SELECTOR) + expected_result = 3 + self.assertEqual(result, expected_result) + + def test_get_metric_ddus(self): + """Test fetching the estimated DDUs consumed by a metric.""" + response_file = f"{RESPONSE_DIR}/descriptors.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + parameters={ + "metricSelector": METRIC_SELECTOR + }, + response_file=response_file + ) + + result = metrics.get_metric_estimated_ddus(CLUSTER, TENANT, METRIC_SELECTOR) + expected_result = 3 * 525.6 + self.assertEqual(result, expected_result) + + +class TestPushMetrics(unittest.TestCase): + """Tests for metrics ingestion capability""" + + def test_metrics_ingest(self): + """Tests simple metric ingestion""" + request_file = f"{REQUEST_DIR}/payload.txt" + with open(file=request_file, mode='r') as text: + payload = text.read() + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/ingest", + request_type="POST", + request_data=request_file, + response_code=202 + ) + + result = metrics.ingest_metrics(CLUSTER, TENANT, payload) + self.assertEqual(result.status_code, 202) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_oneagents.py b/tests/test_oneagents.py new file mode 100644 index 0000000..e2e0bd6 --- /dev/null +++ b/tests/test_oneagents.py @@ -0,0 +1,89 @@ +""" +Test Suite for OneAgents API/Operations +""" +import unittest + +from user_variables import FULL_SET # pylint: disable=import-error +from tests import tooling_for_test as testtools +from dynatrace.framework.request_handler import TenantAPIs +from dynatrace.tenant import oneagents + +CLUSTER = FULL_SET["mockserver1"] +TENANT = "tenant1" +V1HOST_URL = f'{TenantAPIs.V1_TOPOLOGY}/infrastructure/hosts' +ONEAGENTS_URL = f'{TenantAPIs.ONEAGENTS}' +RESPONSE_DIR = "tests/mockserver_payloads/responses/oneagents" + + +class TestHostUnits(unittest.TestCase): + """Test cases for retrieving host units""" + + def test_get_host_units_tenantwide(self): + """Test getting host units tenant-wide""" + + response_file = f'{RESPONSE_DIR}/v1_get_all_hosts.json' + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=V1HOST_URL, + request_type="GET", + response_file=response_file + ) + + result = oneagents.get_host_units_tenantwide(CLUSTER, TENANT) + self.assertEqual(result, 4) + + def test_get_host_units_clusterwide(self): + """Test getting host units cluster-wide""" + + response_file = f'{RESPONSE_DIR}/v1_get_all_hosts.json' + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=V1HOST_URL, + request_type="GET", + response_file=response_file + ) + + result = oneagents.get_host_units_clusterwide(CLUSTER) + self.assertEqual(result, 4) + + def test_get_host_units_setwide(self): + """Test getting host units set-wide""" + + response_file = f'{RESPONSE_DIR}/v1_get_all_hosts.json' + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=V1HOST_URL, + request_type="GET", + response_file=response_file + ) + + result = oneagents.get_host_units_setwide(FULL_SET) + self.assertEqual(result, 4) + + +class TestOneAgents(unittest.TestCase): + """Test cases for OneAgent operations""" + + def test_get_oneagents_tenantwide(self): + """Test getting OneAgents tenant-wide""" + + response_file = f'{RESPONSE_DIR}/get_oneagents.json' + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=ONEAGENTS_URL, + request_type="GET", + response_file=response_file + ) + + result = oneagents.get_oneagents_tenantwide(CLUSTER, TENANT) + expected_result = testtools.expected_payload(response_file).get('hosts') + + self.assertEqual(result, expected_result) diff --git a/tests/test_problems.py b/tests/test_problems.py new file mode 100644 index 0000000..9b6e041 --- /dev/null +++ b/tests/test_problems.py @@ -0,0 +1,187 @@ +"""Test Suite for the Extensions API""" +import unittest +from user_variables import FULL_SET # pylint: disable=import-error +from tests import tooling_for_test as testtools +from dynatrace.framework.request_handler import TenantAPIs, HTTP +from dynatrace.tenant import problems + +CLUSTER = FULL_SET["mockserver1"] +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.PROBLEMS) +PROBLEM_ID = "-123456789_987654321V2" +COMMENT_ID = "123456789" +REQUEST_DIR = "tests/mockserver_payloads/requests/problems" +RESPONSE_DIR = "tests/mockserver_payloads/responses/problems" + + +class TestGetProblems(unittest.TestCase): + """Test cases for fetching problems, comments, and their details""" + + def test_get_all_problems(self): + """Tests fetching all problems in a tenant""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + response_file=response_file, + request_type=str(HTTP.GET) + ) + + result = problems.get_all_problems(CLUSTER, TENANT) + expected_result = testtools.expected_payload(response_file).get('problems') + + self.assertEqual(result, expected_result) + + def test_get_problem_details(self): + """Tests fetching the details of a single problem""" + response_file = f"{RESPONSE_DIR}/get_one.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{PROBLEM_ID}", + response_file=response_file, + request_type=str(HTTP.GET) + ) + + result = problems.get_problem_details(CLUSTER, TENANT, PROBLEM_ID) + expected_result = testtools.expected_payload(response_file) + + self.assertEqual(result, expected_result) + + def test_get_problem_count(self): + """Tests fetching the total number of problems""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + response_file=response_file, + request_type=str(HTTP.GET) + ) + + result = problems.get_problem_count(CLUSTER, TENANT) + + self.assertEqual(result, 2) + + def test_get_all_comments(self): + """Tests fetching all comments of a problem""" + response_file = f"{RESPONSE_DIR}/get_comments.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{PROBLEM_ID}/comments", + response_file=response_file, + request_type=str(HTTP.GET) + ) + + result = problems.get_all_comments(CLUSTER, TENANT, PROBLEM_ID) + expected_result = testtools.expected_payload(response_file).get('comments') + + self.assertEqual(result, expected_result) + + def test_get_comment_details(self): + """Tests fetching a single comment of a problem""" + response_file = f"{RESPONSE_DIR}/get_comment.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{PROBLEM_ID}/comments/{COMMENT_ID}", + response_file=response_file, + request_type=str(HTTP.GET) + ) + + result = problems.get_comment(CLUSTER, TENANT, PROBLEM_ID, COMMENT_ID) + expected_result = testtools.expected_payload(response_file) + + self.assertEqual(result, expected_result) + + +class TestModifyProblems(unittest.TestCase): + """Test cases for making changes to problems and comments""" + + def test_close_problem(self): + """Tests the manual closing of a problem""" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{PROBLEM_ID}/close", + response_code=204, + request_type=str(HTTP.POST) + ) + + result = problems.close_problem(CLUSTER, TENANT, PROBLEM_ID) + + self.assertEqual(result.status_code, 204) + + def test_add_comment(self): + """Tests adding a comment to a problem""" + request_file = f"{REQUEST_DIR}/comment.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{PROBLEM_ID}/comments", + request_file=request_file, + response_code=200, + request_type=str(HTTP.POST) + ) + + result = problems.add_comment( + cluster=CLUSTER, + tenant=TENANT, + problem_id=PROBLEM_ID, + comment="Test comment", + context="Test" + ) + + self.assertEqual(result.status_code, 200) + + def test_update_comment(self): + """Tests updating an existing comment of problem""" + response_file = f"{RESPONSE_DIR}/get_comment.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{PROBLEM_ID}/comments/{COMMENT_ID}", + request_type=str(HTTP.GET), + response_file=response_file, + mock_id="call1" + ) + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{PROBLEM_ID}/comments/{COMMENT_ID}", + request_file=response_file, + response_code=204, + request_type=str(HTTP.PUT), + mock_id="call2" + ) + + result = problems.update_comment(CLUSTER, TENANT, PROBLEM_ID, COMMENT_ID) + + self.assertEqual(result.status_code, 204) + + def test_delete_comment(self): + """Tests deleting a comment from a problem""" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{PROBLEM_ID}/comments/{COMMENT_ID}", + request_type=str(HTTP.DELETE), + response_code=204 + ) + + result = problems.delete_comment(CLUSTER, TENANT, PROBLEM_ID, COMMENT_ID) + + self.assertEqual(result.status_code, 204) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_request_attributes.py b/tests/test_request_attributes.py new file mode 100644 index 0000000..9ec5039 --- /dev/null +++ b/tests/test_request_attributes.py @@ -0,0 +1,300 @@ +"""Test suite for request_attributes""" + +import unittest +import os +import tests.tooling_for_test as testtools +from dynatrace.framework.request_handler import TenantAPIs, HTTP +from dynatrace.framework.settings import get_cluster_dict +from dynatrace.tenant import request_attributes + +CLUSTER = get_cluster_dict("mockserver1") +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.REQUEST_ATTRIBUTES) +RA_ID = "123abc456-a123-1234-4321-def123ghi45" +RA_NAME = "Mock_ReqAttr_1" +REQUEST_DIR = "tests/mockserver_payloads/requests/request_attributes" +RESPONSE_DIR = "tests/mockserver_payloads/responses/request_attributes" + + +class TestRequestAttributes(unittest.TestCase): + """Test cases for main functionality of the request_attributes module""" + + def test_create_or_update_request_attribute_u(self): + """Tests the create_or_update_request_attribute function. + Test the update portion of this function. + """ + request_file = f"{REQUEST_DIR}/updated.json" + response_file = f"{RESPONSE_DIR}/get_all.json" + ra_json = testtools.expected_payload(request_file) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file, + mock_id="Req1" + ) + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RA_ID}", + request_type=str(HTTP.PUT), + response_code=204, + request_file=request_file, + mock_id="Req2" + ) + + result = request_attributes.create_or_update_request_attribute( + CLUSTER, TENANT, ra_json + ).status_code + expected_result = 204 + + self.assertEqual(result, expected_result) + + def test_create_or_update_request_attribute_c(self): + """Tests the create_or_update_request_attribute function. + Test the create portion of this function. + """ + request_file = f"{REQUEST_DIR}/definition.json" + response_file = f"{RESPONSE_DIR}/get_all.json" + ra_json = testtools.expected_payload(request_file) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file, + mock_id="Req1" + ) + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.POST), + response_code=201, + request_file=request_file, + mock_id="Req2" + ) + + result = request_attributes.create_or_update_request_attribute( + CLUSTER, TENANT, ra_json + ).status_code + expected_result = 201 + + self.assertEqual(result, expected_result) + + def test_create_request_attribute(self): + """Tests the create_request_attribute function""" + request_file = f"{REQUEST_DIR}/definition.json" + ra_json = testtools.expected_payload(request_file) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.POST), + response_code=201, + request_file=request_file + ) + + result = request_attributes.create_request_attribute( + CLUSTER, TENANT, ra_json + ).status_code + expected_result = 201 + + self.assertEqual(result, expected_result) + + def test_delete_request_attribute_by_id(self): + """Tests the delete_request_attribute_by_id function""" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RA_ID}", + request_type=str(HTTP.DELETE), + response_code=204 + ) + + result = request_attributes.delete_request_attribute_by_id( + CLUSTER, TENANT, RA_ID + ).status_code + expected_result = 204 + + self.assertEqual(result, expected_result) + + def test_delete_request_attribute_by_name(self): + """Tests the delete_request_attribute_by_name function""" + response_file = f"{RESPONSE_DIR}/get_all.json" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file, + mock_id="Req1" + ) + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RA_ID}", + request_type=str(HTTP.DELETE), + response_code=204, + mock_id="Req2" + ) + + result = request_attributes.delete_request_attribute_by_name( + CLUSTER, TENANT, RA_NAME + ).status_code + expected_result = 204 + + self.assertEqual(result, expected_result) + + def test_export_to_files(self): + """Tests the export_to_files function""" + response_file1 = f"{RESPONSE_DIR}/get_all_one.json" + response_file2 = f"{RESPONSE_DIR}/get_one.json" + folder = RESPONSE_DIR + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file1, + mock_id="Req1" + ) + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RA_ID}", + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file2, + mock_id="Req2" + ) + + request_attributes.export_to_files(CLUSTER, TENANT, folder) + file = os.path.exists(f"{RESPONSE_DIR}/{RA_NAME}.json") + expected_file = True + file_data = testtools.expected_payload(f"{RESPONSE_DIR}/{RA_NAME}.json") + expected_file_data = testtools.expected_payload(response_file2) + + self.assertEqual(file, expected_file) + self.assertEqual(file_data, expected_file_data) + + def test_get_all_request_attributes(self): + """Tests the get_all_request_attributes function""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file + ) + + result = request_attributes.get_all_request_attributes(CLUSTER, TENANT) + expected_result = testtools.expected_payload(response_file).get("values") + + self.assertEqual(result, expected_result) + + def test_get_request_attribute_details(self): + """Tests the get_request_attribute_details function""" + response_file = f"{RESPONSE_DIR}/get_one.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RA_ID}", + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file + ) + + result = request_attributes.get_request_attribute_details( + CLUSTER, TENANT, RA_ID + ) + expected_result = testtools.expected_payload(response_file) + + self.assertEqual(result, expected_result) + + def test_get_request_attribute_id(self): + """Tests the get_request_attribute_id function""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file + ) + + result = request_attributes.get_request_attribute_id(CLUSTER, TENANT, RA_NAME) + expected_result = RA_ID + + self.assertEqual(result, expected_result) + + def test_update_request_attribute(self): + """Tests the update_request_attribute function""" + request_file = f"{REQUEST_DIR}/updated.json" + ra_json = testtools.expected_payload(request_file) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RA_ID}", + request_type=str(HTTP.PUT), + response_code=204, + request_file=request_file + ) + + result = request_attributes.update_request_attribute( + CLUSTER, TENANT, RA_ID, ra_json + ).status_code + expected_result = 204 + + self.assertEqual(result, expected_result) + + +class TestErrorHandling(unittest.TestCase): + """Tests Request Attribute Error Handling""" + def test_delete_request_attribute_by_name_runtime_error(self): + """Tests error handling for function delete_request_attribute_by_name. + RuntimeError should be raised when request attribute ID is not found. + """ + response_file = f"{RESPONSE_DIR}/get_all.json" + ra_name = "invalid_name" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file + ) + + with self.assertRaises(RuntimeError): + request_attributes.delete_request_attribute_by_name(CLUSTER, TENANT, ra_name) + + def test_export_to_files_runtime_error(self): + """Tests error handling for function export_to_files. + RuntimeError should be raised when export folder does not exist. + """ + folder = "invalid_folder/path" + + with self.assertRaises(RuntimeError): + request_attributes.export_to_files(CLUSTER, TENANT, folder) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_request_naming.py b/tests/test_request_naming.py new file mode 100644 index 0000000..66db0c2 --- /dev/null +++ b/tests/test_request_naming.py @@ -0,0 +1,162 @@ +"""Test suite for request_naming""" + +import unittest +import os +import tests.tooling_for_test as testtools +from dynatrace.framework.request_handler import TenantAPIs, HTTP +from dynatrace.framework.settings import get_cluster_dict +from dynatrace.tenant import request_naming + +CLUSTER = get_cluster_dict("mockserver1") +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.REQUEST_NAMING) +RULE_ID = "abc1234def-1233-3321-ab123-abc123defghi" +REQUEST_DIR = "tests/mockserver_payloads/requests/request_naming" +RESPONSE_DIR = "tests/mockserver_payloads/responses/request_naming" + + +class TestRequestNaming(unittest.TestCase): + """Test cases for main functionality of the request_naming module""" + + def test_create_naming_rule(self): + """Tests the create_naming_rule function""" + request_file = f"{REQUEST_DIR}/definition.json" + rule_json = testtools.expected_payload(request_file) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.POST), + response_code=201, + request_file=request_file + ) + + result = request_naming.create_naming_rule(CLUSTER, TENANT, rule_json).status_code + expected_result = 201 + + self.assertEqual(result, expected_result) + + def test_delete_naming_rule(self): + """Tests the delete_naming_rule function""" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RULE_ID}", + request_type=str(HTTP.DELETE), + response_code=204 + ) + + result = request_naming.delete_naming_rule(CLUSTER, TENANT, RULE_ID).status_code + expected_result = 204 + + self.assertEqual(result, expected_result) + + def test_export_to_files(self): + """Tests the export_to_files function""" + response_file1 = f"{RESPONSE_DIR}/get_all.json" + response_file2 = f"{RESPONSE_DIR}/get_one.json" + rules_list = testtools.expected_payload(response_file1).get("values") + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file1, + mock_id="Req1" + ) + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RULE_ID}", + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file2, + mock_id="Req2" + ) + + request_naming.export_to_files(CLUSTER, TENANT, RESPONSE_DIR) + file = os.path.exists(f"{RESPONSE_DIR}/{rules_list[0].get('name')}.json") + expected_file = True + file_data = testtools.expected_payload( + f"{RESPONSE_DIR}/{rules_list[0].get('name')}.json" + ) + expected_file_data = testtools.expected_payload(response_file2) + + self.assertEqual(file, expected_file) + self.assertEqual(file_data, expected_file_data) + + def test_get_all_rules(self): + """Tests the get_all_rules function""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file + ) + + result = request_naming.get_all_rules(CLUSTER, TENANT) + expected_result = testtools.expected_payload(response_file).get("values") + + self.assertEqual(result, expected_result) + + def test_get_rule_details(self): + """Tests the get_rule_details function""" + response_file = f"{RESPONSE_DIR}/get_one.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RULE_ID}", + request_type=str(HTTP.GET), + response_code=200, + response_file=response_file + ) + + result = request_naming.get_rule_details(CLUSTER, TENANT, RULE_ID) + expected_result = testtools.expected_payload(response_file) + + self.assertEqual(result, expected_result) + + def test_update_naming_rule(self): + """Tests the update_naming_rule function""" + request_file = f"{REQUEST_DIR}/updated.json" + rule_json = testtools.expected_payload(request_file) + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{RULE_ID}", + request_type=str(HTTP.PUT), + response_code=204, + request_file=request_file + ) + + result = request_naming.update_naming_rule( + CLUSTER, TENANT, RULE_ID, rule_json + ).status_code + expected_result = 204 + + self.assertEqual(result, expected_result) + + +class TestErrorHandling(unittest.TestCase): + """Test Request Naming Error Handling""" + def test_export_to_files_runtime_error(self): + """Tests error handling for function export_to_files. + RuntimeError should be raised when the folder path does not exist. + """ + folder = "nonexistent/folder/path" + with self.assertRaises(RuntimeError): + request_naming.export_to_files(CLUSTER, TENANT, folder) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_settings.py b/tests/test_settings.py new file mode 100644 index 0000000..598b537 --- /dev/null +++ b/tests/test_settings.py @@ -0,0 +1,167 @@ +"""Test Cases for dynatrace.settings""" +import unittest +import yaml +from dynatrace.framework import settings + +SETTINGS_JSON = "user_variables.json" +SETTINGS_YAML = "user_variables.yaml" +with open(SETTINGS_YAML) as file: + IMPORTED_SETTINGS = yaml.load(file, Loader=yaml.FullLoader) + +URL = "test.site" +TENANT_TOKEN = "new_tenant_token" + +class TestSettings(unittest.TestCase): + """Standard Testing Class""" + def test_get_setting_from_user_variable(self): + """Will pull from user_variables when available""" + timezone = settings.get_setting("DEFAULT_TIMEZONE") + self.assertEqual(timezone, "America/Chicago") + + def test_get_setting_from_default(self): + """When not in user_variables, info should be pulled from default values""" + log_dir = settings.get_setting("LOG_DIR") + self.assertEqual(log_dir, "logs/") + + def test_get_invalid_setting(self): + """"When not a valid setting, an error should be thrown""" + with self.assertRaises(AttributeError) as context: + settings.get_setting("INVALID_SETTING") + + self.assertIn("not a valid user variable attribute", + str(context.exception)) + +class TestSettingsFile(unittest.TestCase): + """Test settings file is being used when provided""" + def test_import_yaml(self): + """Testing YAML Import""" + settings.load_settings_from_file(SETTINGS_YAML) + self.assertEqual(settings.get_setting("LOG_LEVEL"), "DEBUG") + def test_import_json(self): + """Testing JSON Import""" + settings.load_settings_from_file(SETTINGS_JSON) + self.assertEqual(settings.get_setting("LOG_LEVEL"), "WARNING") + def test_import_multi(self): + """Ensure Latest user_variables file takes precedent""" + settings.load_settings_from_file(SETTINGS_YAML) + settings.load_settings_from_file(SETTINGS_JSON) + self.assertEqual(settings.get_setting("LOG_LEVEL"), "WARNING") + self.assertEqual(settings.get_setting("DEFAULT_TIMEZONE"),"America/Chicago") + +class TestClusterSettings(unittest.TestCase): + """Test Cluster Settings""" + def test_get_cluster_dict_with_dict(self): + """Test retreiving cluster dictionary by passing in a dict""" + + request_cluster = IMPORTED_SETTINGS['FULL_SET']['mockserver1'] + response_cluster = settings.get_cluster_dict(request_cluster) + self.assertEqual(request_cluster, response_cluster) + def test_get_cluster_dict_with_str(self): + """Test retreiving cluster dictionary by passing in a dict""" + response_cluster = settings.get_cluster_dict("mockserver1") + self.assertEqual(IMPORTED_SETTINGS['FULL_SET']['mockserver1'], response_cluster) + def test_cluster_creation_minimal(self): + """Test cluster creation programatically""" + new_cluster = settings.create_cluster("cluster", URL) + expected_cluster = { + "url": URL, + 'tenant': {}, + 'api_token': {}, + 'verify_ssl': True, + 'is_managed': True, + } + self.assertEqual(new_cluster, expected_cluster) + def test_cluster_creation_complete(self): + """Test cluster creation programatically""" + new_cluster = settings.create_cluster( + "cluster", + URL, + tenant_ids={"tenant1":"tenant1-id"}, + tenant_tokens={"tenant1":"tenant1-token"}, + cluster_token="cluster_api_token" + ) + expected_cluster = { + "url": URL, + "tenant": { + "tenant1": "tenant1-id" + }, + "api_token": { + "tenant1": "tenant1-token" + }, + "verify_ssl": True, + "is_managed": True, + "cluster_token": "cluster_api_token" + } + self.assertEqual(new_cluster, expected_cluster) + def test_add_tenant_to_cluster(self): + """Test adding tenant to cluster programatically""" + settings.create_cluster( + "cluster", + URL + ) + new_cluster = settings.add_tenant_to_cluster( + "cluster", + "new-id-here", + TENANT_TOKEN, + "tenant2" + ) + expected_cluster = { + "url": URL, + "tenant": { + "tenant2": "new-id-here" + }, + "api_token": { + "tenant2": TENANT_TOKEN + }, + "verify_ssl": True, + "is_managed": True + } + self.assertEqual(new_cluster, expected_cluster) + +class TestClusterExceptions(unittest.TestCase): + """Exceptions with Cluster Operations""" + def test_cluster_creation_improper_tenant(self): + """Create cluster with an invalid tenant_id combination""" + with self.assertRaises(ValueError) as context: + settings.create_cluster( + "cluster", + URL, + tenant_ids={"tenant1":"tenant1-id"}, + cluster_token="cluster_api_token" + ) + self.assertTrue("Tenant and tenant token must both be dict" in str(context.exception)) + def test_add_tenant_to_cluster_dict(self): + """Addiing Tenant to Cluster Dictionary""" + with self.assertRaises(NotImplementedError) as context: + new_cluster = settings.create_cluster( + "cluster", + URL + ) + new_cluster = settings.add_tenant_to_cluster( + new_cluster, + "new-id-here", + TENANT_TOKEN, + "tenant2" + ) + self.assertTrue( + "Cluster dicts are not supported yet. Please use str for the cluster's key" \ + in str(context.exception)) + def test_add_tenant_to_cluster_nonexistant(self): + """Get Cluster Dict with an invalid/nonexistant key""" + with self.assertRaises(KeyError) as context: + settings.create_cluster( + "cluster", + URL + ) + settings.add_tenant_to_cluster( + "cluster2", + "new-id-here", + TENANT_TOKEN, + "tenant2" + ) + self.assertTrue("Cluster not found" in str(context.exception)) + def test_get_cluster_dict_nonexistant(self): + """Get Cluster Dict with an invalid/nonexistant key""" + with self.assertRaises(ValueError) as context: + settings.get_cluster_dict("cluster2") + self.assertTrue("Cluster not found" in str(context.exception)) diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py new file mode 100644 index 0000000..2891436 --- /dev/null +++ b/tests/test_timeseries.py @@ -0,0 +1,47 @@ +"""Test cases for Timeseries (V1) API operations""" + +import unittest +from dynatrace.framework.request_handler import TenantAPIs, HTTP +from dynatrace.framework.settings import get_cluster_dict +from dynatrace.tenant import timeseries +from tests import tooling_for_test as testtools + +CLUSTER = get_cluster_dict("mockserver1") +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.TIMESERIES) +TIMESERIES_ID = "custom.test.timeseries" +RESPONSE_DIR = "tests/mockserver_payloads/responses/timeseries" + + +class TestGetTimeseries(unittest.TestCase): + """Test cases for fetch operations""" + + def test_get_metric_with_prediction(self): + """Test getting metric data with prediction""" + response_file = f"{RESPONSE_DIR}/get_predict.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{TIMESERIES_ID}", + request_type=str(HTTP.GET), + parameters={ + "relativeTime": "30mins", + "includeData": "True", + "predict": "True" + }, + response_file=response_file + ) + + result = timeseries.get_metric_data_with_prediction( + CLUSTER, TENANT, TIMESERIES_ID + ) + expected_result = testtools.expected_payload( + response_file + ).get("dataResult").get("dataPoints") + + self.assertEqual(result, expected_result) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_topology_hosts.py b/tests/test_topology_hosts.py deleted file mode 100644 index ac99dc7..0000000 --- a/tests/test_topology_hosts.py +++ /dev/null @@ -1,129 +0,0 @@ -""" -Test Suite for Topology Hosts -""" -import unittest -from user_variables import FULL_SET -from tests import tooling_for_test as testtools -from dynatrace.requests.request_handler import TenantAPIs -from dynatrace.tenant.topology import hosts - -CLUSTER = FULL_SET["mockserver1"] -TENANT = "tenant1" -URL_PATH = f"{TenantAPIs.V1_TOPOLOGY}/infrastructure/hosts" -REQUEST_DIR = "tests/mockserver_payloads/requests/hosts" -RESPONSE_DIR = "tests/mockserver_payloads/responses/hosts" - - -class TestGetHosts(unittest.TestCase): - """Tests cases for fetching topology hosts.""" - - def test_get_all_hosts(self): - """Test fetching all hosts""" - - response_file = f"{RESPONSE_DIR}/get_all.json" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=URL_PATH, - request_type="GET", - response_file=response_file - ) - - result = hosts.get_hosts_tenantwide(CLUSTER, TENANT) - self.assertEqual(result, testtools.expected_payload(response_file)) - - def test_get_single_host(self): - """Test fetching a specific host""" - - host_id = "HOST-9F74450267BAAE20" - response_file = f"{RESPONSE_DIR}/get_single.json" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=f"{URL_PATH}/{host_id}", - request_type="GET", - response_file=response_file - ) - - result = hosts.get_host(CLUSTER, TENANT, host_id) - self.assertEqual(result, testtools.expected_payload(response_file)) - - def test_get_host_count(self): - """Test getting the count of hosts in a tenant.""" - - response_file = f"{RESPONSE_DIR}/get_all.json" - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=URL_PATH, - request_type="GET", - response_file=response_file, - parameters=dict(relativeTime=['day'], - includeDetails=['False']) - ) - - result = hosts.get_host_count_tenantwide(CLUSTER, TENANT) - self.assertEqual(result, 3) - - def test_get_host_units(self): - """Tests getting the consumed host units in a tenant.""" - - response_file = f"{RESPONSE_DIR}/get_all.json" - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=URL_PATH, - request_type="GET", - response_file=response_file - ) - - result = hosts.get_host_units_tenantwide(CLUSTER, TENANT) - self.assertEqual(result, 4) - - hosts.set_host_properties - - -class TestHostTagging(unittest.TestCase): - """Test cases for testing host-level tagging.""" - - def test_add_tags(self): - """Test adding two tags to a specific host.""" - - host_id = "HOST-ABC123DEF456GHIJ" - request_file = f"{REQUEST_DIR}/tags.json" - tags = ["demo", "example"] - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - request_type="POST", - url_path=f"{URL_PATH}/{host_id}", - request_file=request_file, - response_code=201 - ) - - result = hosts.add_host_tags(CLUSTER, TENANT, host_id, tags) - self.assertEqual(result, 201) - - def test_delete_tags(self): - """Test deleting a tag from a specific host.""" - - host_id = "HOST-ABC123DEF456GHIJ" - tag = "demo" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=f"{URL_PATH}/{host_id}/tags/{tag}", - request_type="DELETE", - response_code=204 - ) - - result = hosts.delete_host_tag(CLUSTER, TENANT, host_id, tag) - self.assertEqual(204, result.status_code) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/test_topology_process_groups.py b/tests/test_topology_process_groups.py deleted file mode 100644 index 5f103b2..0000000 --- a/tests/test_topology_process_groups.py +++ /dev/null @@ -1,91 +0,0 @@ -"""Test Suite for Topology Process Groups""" - -import unittest -from user_variables import FULL_SET -from tests import tooling_for_test as testtools -from dynatrace.requests.request_handler import TenantAPIs -from dynatrace.tenant.topology import process_groups - -CLUSTER = FULL_SET.get('mockserver1') -TENANT = 'tenant1' -URL_PATH = f"{TenantAPIs.V1_TOPOLOGY}/infrastructure/process-groups" -REQUEST_DIR = "tests/mockserver_payloads/requests/processes" -RESPONSE_DIR = "tests/mockserver_payloads/responses/processes" - - -class TestGetPGs(unittest.TestCase): - """Test cases for fetching topology process groups.""" - - def test_get_all_pgs(self): - """Test fetching all PGs""" - response_file = f"{RESPONSE_DIR}/get_all_pgs.json" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=URL_PATH, - request_type="GET", - response_file=response_file - ) - - result = process_groups.get_process_groups_tenantwide(CLUSTER, TENANT) - self.assertEqual(result, testtools.expected_payload(response_file)) - - def test_get_single_pg(self): - """Test fetching single PG""" - response_file = f"{RESPONSE_DIR}/get_one_pg.json" - pg_id = "PROCESS_GROUP-ABC123DEF456GHI7" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=f"{URL_PATH}/{pg_id}", - request_type="GET", - response_file=response_file - ) - - result = process_groups.get_process_group(CLUSTER, TENANT, pg_id) - self.assertEqual(result, testtools.expected_payload(response_file)) - - def test_get_pg_count(self): - """Test getting the PG count tenantwide.""" - response_file = f"{RESPONSE_DIR}/get_all_pgs.json" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=URL_PATH, - request_type="GET", - response_file=response_file - ) - - result = process_groups.get_process_group_count_tenantwide(CLUSTER, - TENANT) - self.assertEqual(result, 3) - - -class TestPGTags(unittest.TestCase): - """Test cases for PG tags""" - - def test_add_pg_tags(self): - """Test adding two tags to the PG.""" - pg_id = "PROCESS_GROUP-859E1549052CD876" - request_file = f"{REQUEST_DIR}/tags.json" - tags = ["demo", "example"] - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - request_type="POST", - url_path=f"{URL_PATH}/{pg_id}", - request_file=request_file, - response_code=201 - ) - - result = process_groups.add_process_group_tags(CLUSTER, TENANT, - pg_id, tags) - self.assertEqual(result, 201) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/test_topology_processes.py b/tests/test_topology_processes.py deleted file mode 100644 index 3fc29b1..0000000 --- a/tests/test_topology_processes.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Test suite for Topology Processes""" - -import unittest -from user_variables import FULL_SET -from tests import tooling_for_test as testtools -from dynatrace.requests.request_handler import TenantAPIs -from dynatrace.tenant.topology import process - -CLUSTER = FULL_SET.get('mockserver1') -TENANT = 'tenant1' -URL_PATH = f"{TenantAPIs.V1_TOPOLOGY}/infrastructure/processes" -RESPONSE_DIR = "tests/mockserver_payloads/responses/processes" - - -class TestGetProcesses(unittest.TestCase): - """Test cases for fetching topology processes.""" - - def test_get_all_processes(self): - """Test getting all processes tenantwide.""" - response_file = f"{RESPONSE_DIR}/get_all_pgis.json" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=URL_PATH, - request_type="GET", - response_file=response_file - ) - - result = process.get_processes_tenantwide(CLUSTER, TENANT) - self.assertEqual(result, testtools.expected_payload(response_file)) - - def test_get_single_process(self): - """Tests getting one specific process.""" - response_file = f"{RESPONSE_DIR}/get_one_pgi.json" - process_id = "PROCESS_GROUP_INSTANCE-ABC123DEF456GHI7" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=f"{URL_PATH}/{process_id}", - request_type="GET", - response_file=response_file - ) - - result = process.get_process(CLUSTER, TENANT, process_id) - self.assertEqual(result, testtools.expected_payload(response_file)) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/test_topology_services.py b/tests/test_topology_services.py deleted file mode 100644 index 68e02ac..0000000 --- a/tests/test_topology_services.py +++ /dev/null @@ -1,89 +0,0 @@ -"""Test Suite for Topology Services""" - -import unittest -from user_variables import FULL_SET -from tests import tooling_for_test as testtools -from dynatrace.requests.request_handler import TenantAPIs -from dynatrace.tenant.topology import services - -CLUSTER = FULL_SET.get('mockserver1') -TENANT = 'tenant1' -URL_PATH = f"{TenantAPIs.V1_TOPOLOGY}/infrastructure/services" -REQUEST_DIR = "tests/mockserver_payloads/requests/services" -RESPONSE_DIR = "tests/mockserver_payloads/responses/services" - - -class TestGetServices(unittest.TestCase): - """Test cases for fetching topology services.""" - - def test_get_all_svc(self): - """Test fetching all services""" - response_file = f"{RESPONSE_DIR}/get_all.json" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=URL_PATH, - request_type="GET", - response_file=response_file - ) - - result = services.get_services_tenantwide(CLUSTER, TENANT) - self.assertEqual(result, testtools.expected_payload(response_file)) - - def test_get_single_svc(self): - """Test fetching single service""" - response_file = f"{RESPONSE_DIR}/get_one.json" - svc_id = "SERVICE-ABC123DEF456GHI7" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=f"{URL_PATH}/{svc_id}", - request_type="GET", - response_file=response_file - ) - - result = services.get_service(CLUSTER, TENANT, svc_id) - self.assertEqual(result, testtools.expected_payload(response_file)) - - def test_get_svc_count(self): - """Test getting the service count tenantwide.""" - response_file = f"{RESPONSE_DIR}/get_all.json" - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - url_path=URL_PATH, - request_type="GET", - response_file=response_file - ) - - result = services.get_service_count_tenantwide(CLUSTER, TENANT) - self.assertEqual(result, 3) - - -class TestServiceTags(unittest.TestCase): - """Test cases for service tags""" - - def test_add_svc_tags(self): - """Test adding two tags to the service.""" - svc_id = "SERVICE-ABC123DEF456GHI7" - request_file = f"{REQUEST_DIR}/tags.json" - tags = ["demo", "example"] - - testtools.create_mockserver_expectation( - cluster=CLUSTER, - tenant=TENANT, - request_type="POST", - url_path=f"{URL_PATH}/{svc_id}", - request_file=request_file, - response_code=201 - ) - - result = services.add_service_tags(CLUSTER, TENANT, svc_id, tags) - self.assertEqual(result, 201) - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/tooling_for_test.py b/tests/tooling_for_test.py index 27e14fb..9ece8a1 100644 --- a/tests/tooling_for_test.py +++ b/tests/tooling_for_test.py @@ -1,20 +1,43 @@ """Mockserver Expectation Setup""" -import requests import json import logging -from dynatrace.requests.request_handler import generate_tenant_url +import requests +from dynatrace.framework.request_handler import generate_tenant_url logging.basicConfig(filename="testing_tools.log", level=logging.DEBUG) def create_mockserver_expectation(cluster, tenant, url_path, request_type, **kwargs): - requests.packages.urllib3.disable_warnings() + """Creates an expectation for a mockserver request. + \n + @param cluster (dict) - Cluster dictionary (as taken from variable set)\n + @param tenant (str) - name of Tenant (as taken from variable set)\n + @param url_path (str) - path for the request that matches this expectation\n + @param request_type (HTTP str) - type of HTTP request that matches expectation + \n + @kwargs parameters (dict) - query string parameters for the request\n + @kwargs request_file (str) - path to JSON file representing request payload\n + @kwargs request_data (str) - path to plain-text file representing request payload + @kwargs response_body (str) - path to JSON file representing response to request\n + @kwargs response_code (int) - HTTP response code + \n + @throws ValueError - when the response code is not positive + """ + requests.packages.urllib3.disable_warnings() # pylint: disable=no-member + + if cluster.get("is_managed"): + expected_path = f"/e/{cluster.get('tenant').get(tenant)}{url_path}" + expectation_url = f"http://{cluster['url']}/mockserver/expectation" + else: + expected_path = url_path + expectation_url = f"{generate_tenant_url(cluster, tenant)}/mockserver/expectation" + expectation = { "httpRequest": { "headers": { - "Authorization": [f"Api-Token {cluster.get('api_token').get(tenant)}"], + "Authorization": [f"Api-Token {cluster.get('api_token').get(tenant)}"] }, - "path": url_path, + "path": expected_path, "method": request_type }, "httpResponse": { @@ -27,29 +50,47 @@ def create_mockserver_expectation(cluster, tenant, url_path, request_type, **kwa "id": "OneOff", } - logging.debug(f"URL PATH: {url_path}") - logging.debug(f"KWARGS {kwargs}") - # Paramaters should always at least have Api-Token + logging.debug("URL PATH: %s", url_path) + logging.debug("KWARGS %s", kwargs) + + # Mockserver expectation syntax expects each parameter's matching values + # to be given as a list (even if just 1 value) if 'parameters' in kwargs: - expectation["httpRequest"]["queryStringParameters"] = kwargs['parameters'] + expectation["httpRequest"]["queryStringParameters"] = { + param: [ + kwargs['parameters'][param] + ] + for param in kwargs['parameters'] + } if "request_file" in kwargs: - with open(kwargs['request_file']) as f: - request_payload = json.load(f) + with open(kwargs['request_file']) as open_file: + request_payload = json.load(open_file) expectation["httpRequest"]["body"] = { "type": "JSON", "json": request_payload, } + if "request_data" in kwargs: + with open(kwargs['request_data']) as file: + request_data = file.read() + expectation["httpRequest"]["body"] = { + "type": "STRING", + "string": request_data, + "contentType": "text/plain" + } + if "response_file" in kwargs: - with open(kwargs['response_file']) as f: - response_payload = json.load(f) + with open(kwargs['response_file']) as open_file: + response_payload = json.load(open_file) expectation["httpResponse"]["body"] = { "type": "JSON", "json": response_payload, } expectation["httpResponse"]["headers"] = { - "content-type": ["application/json"] + "content-type": ["application/json"], + "x-ratelimit-remaining": ['100000000'], + "x-ratelimit-limit": ['100000000'] } if "response_code" in kwargs: @@ -60,7 +101,6 @@ def create_mockserver_expectation(cluster, tenant, url_path, request_type, **kwa logging.debug(expectation) - expectation_url = f"{generate_tenant_url(cluster, tenant)}/mockserver/expectation" test_req = requests.request( "PUT", expectation_url, @@ -74,5 +114,13 @@ def create_mockserver_expectation(cluster, tenant, url_path, request_type, **kwa def expected_payload(json_file): - with open(json_file) as f: - return json.load(f) + """The payload that should be tested against + + Args: + json_file (str): file name for result json + + Returns: + dict: payload of the expected result JSON + """ + with open(json_file) as open_file: + return json.load(open_file)