diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000..20c1262 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,28 @@ +version: 2 +jobs: + build: + docker: + - image: circleci/python:3.8.3 + environment: + PIPENV_VENV_IN_PROJECT: true + - image: mockserver/mockserver + name: mockserver.mockserver + steps: + - checkout # check out source code to working directory + - run: + command: | + sudo chown -R circleci:circleci /usr/local/bin + sudo chown -R circleci:circleci /usr/local/lib/python3.8/site-packages + - run: + command: | + cp .circleci/mockserver.py user_variables.py + pip install pipenv + pipenv install --dev + - run: + command: | + pipenv run python -m unittest discover + - store_test_results: + path: test-results + - store_artifacts: + path: test-results + destination: tr1 diff --git a/.circleci/mockserver.py b/.circleci/mockserver.py new file mode 100644 index 0000000..acfb7fd --- /dev/null +++ b/.circleci/mockserver.py @@ -0,0 +1,42 @@ +"""User variables to use toolkit for Dynatrace""" +FULL_SET = { + "mockserver1": { + "url": "mockserver:1080", + "tenant": { + "tenant1": "mockserver", + }, + "api_token": { + "tenant1": "sample_api_token", + }, + "verify_ssl": False, + "is_managed": False, + "cluster_token": "Required for Cluster Operations in Managed" + } +} + +LOG_LEVEL="INFO" + +# ROLE TYPE KEYS +# access_env +# change_settings +# install_agent +# view_logs +# view_senstive +# change_sensitive + +USER_GROUPS = { + "role_types": { + "access_env": "accessenv", + "change_settings": "changesettings", + "view_logs": "logviewer", + "view_sensitive": "viewsensitive" + }, + "role_tenants": [ + "nonprod", + "prod" + ] +} + +USER_GROUP_TEMPLATE = "prefix_{USER_TYPE}_{TENANT}_{APP_NAME}_suffix" + +DEFAULT_TIMEZONE = "America/Chicago" diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..b961959 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,9 @@ +[run] +omit = + # omit anything in a .local directory anywhere + */.local/* + # omit everything in /usr + /usr/* + # omit in the test tools + tests/* + user_variables.py \ No newline at end of file diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 0000000..d093e8a --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,51 @@ +########################### +########################### +## Linter GitHub Actions ## +########################### +########################### +name: Lint Code Base + +# +# Documentation: +# https://help.github.com/en/articles/workflow-syntax-for-github-actions +# + +############################# +# Start the job on all push # +############################# +on: + push: + branches-ignore: [master] + # Remove the line above to run when pushing to master + pull_request: + branches: [master, test, dev] + +############### +# Set the Job # +############### +jobs: + build: + # Name the Job + name: Lint Code Base + # Set the agent to run on + runs-on: ubuntu-latest + + ################## + # Load all steps # + ################## + steps: + ########################## + # Checkout the code base # + ########################## + - name: Checkout Code + uses: actions/checkout@v2 + + ################################ + # Run Linter against code base # + ################################ + - name: Lint Code Base + uses: docker://github/super-linter:v3 + env: + VALIDATE_ALL_CODEBASE: false + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 7e2c7e5..3465291 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,11 @@ variable_sets/* scripts/* templates/* -.vscode/ +# Workspace settings and virtual environments +**.vscode** +**venv** +# Framework log files +***.log** user_variables.py sandbox_script.py diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..2e0f5c5 --- /dev/null +++ b/.pylintrc @@ -0,0 +1 @@ +init-hook='from sys.path import append; from os import getcwd; append(getcwd())' \ No newline at end of file diff --git a/Pipfile b/Pipfile index 334d058..393ec6b 100644 --- a/Pipfile +++ b/Pipfile @@ -6,9 +6,10 @@ verify_ssl = true [dev-packages] pylint = "*" autopep8 = "*" +coverage = "*" [packages] requests = "*" [requires] -python_version = "3.4" +python_version = "3.8" diff --git a/Pipfile.lock b/Pipfile.lock index 194f912..19a4db6 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,11 +1,11 @@ { "_meta": { "hash": { - "sha256": "661f0c6028f892faee91814dacb44c678abaf7b6d1645af87357c1483119fa7f" + "sha256": "d77ab23630511fa40710f418270d79d24bc9d2b8a61ab2d2af4b0e938036b609" }, "pipfile-spec": 6, "requires": { - "python_version": "3.4" + "python_version": "3.8" }, "sources": [ { @@ -18,10 +18,10 @@ "default": { "certifi": { "hashes": [ - "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1", - "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc" + "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", + "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" ], - "version": "==2020.4.5.2" + "version": "==2020.6.20" }, "chardet": { "hashes": [ @@ -32,25 +32,25 @@ }, "idna": { "hashes": [ - "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", - "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" + "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", + "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], - "version": "==2.9" + "version": "==2.10" }, "requests": { "hashes": [ - "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", - "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" + "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", + "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" ], "index": "pypi", - "version": "==2.23.0" + "version": "==2.24.0" }, "urllib3": { "hashes": [ - "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527", - "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115" + "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", + "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" ], - "version": "==1.25.9" + "version": "==1.25.10" } }, "develop": { @@ -63,10 +63,50 @@ }, "autopep8": { "hashes": [ - "sha256:60fd8c4341bab59963dafd5d2a566e94f547e660b9b396f772afe67d8481dbf0" + "sha256:d21d3901cb0da6ebd1e83fc9b0dfbde8b46afc2ede4fe32fbda0c7c6118ca094" ], "index": "pypi", - "version": "==1.5.3" + "version": "==1.5.4" + }, + "coverage": { + "hashes": [ + "sha256:098a703d913be6fbd146a8c50cc76513d726b022d170e5e98dc56d958fd592fb", + "sha256:16042dc7f8e632e0dcd5206a5095ebd18cb1d005f4c89694f7f8aafd96dd43a3", + "sha256:1adb6be0dcef0cf9434619d3b892772fdb48e793300f9d762e480e043bd8e716", + "sha256:27ca5a2bc04d68f0776f2cdcb8bbd508bbe430a7bf9c02315cd05fb1d86d0034", + "sha256:28f42dc5172ebdc32622a2c3f7ead1b836cdbf253569ae5673f499e35db0bac3", + "sha256:2fcc8b58953d74d199a1a4d633df8146f0ac36c4e720b4a1997e9b6327af43a8", + "sha256:304fbe451698373dc6653772c72c5d5e883a4aadaf20343592a7abb2e643dae0", + "sha256:30bc103587e0d3df9e52cd9da1dd915265a22fad0b72afe54daf840c984b564f", + "sha256:40f70f81be4d34f8d491e55936904db5c527b0711b2a46513641a5729783c2e4", + "sha256:4186fc95c9febeab5681bc3248553d5ec8c2999b8424d4fc3a39c9cba5796962", + "sha256:46794c815e56f1431c66d81943fa90721bb858375fb36e5903697d5eef88627d", + "sha256:4869ab1c1ed33953bb2433ce7b894a28d724b7aa76c19b11e2878034a4e4680b", + "sha256:4f6428b55d2916a69f8d6453e48a505c07b2245653b0aa9f0dee38785939f5e4", + "sha256:52f185ffd3291196dc1aae506b42e178a592b0b60a8610b108e6ad892cfc1bb3", + "sha256:538f2fd5eb64366f37c97fdb3077d665fa946d2b6d95447622292f38407f9258", + "sha256:64c4f340338c68c463f1b56e3f2f0423f7b17ba6c3febae80b81f0e093077f59", + "sha256:675192fca634f0df69af3493a48224f211f8db4e84452b08d5fcebb9167adb01", + "sha256:700997b77cfab016533b3e7dbc03b71d33ee4df1d79f2463a318ca0263fc29dd", + "sha256:8505e614c983834239f865da2dd336dcf9d72776b951d5dfa5ac36b987726e1b", + "sha256:962c44070c281d86398aeb8f64e1bf37816a4dfc6f4c0f114756b14fc575621d", + "sha256:9e536783a5acee79a9b308be97d3952b662748c4037b6a24cbb339dc7ed8eb89", + "sha256:9ea749fd447ce7fb1ac71f7616371f04054d969d412d37611716721931e36efd", + "sha256:a34cb28e0747ea15e82d13e14de606747e9e484fb28d63c999483f5d5188e89b", + "sha256:a3ee9c793ffefe2944d3a2bd928a0e436cd0ac2d9e3723152d6fd5398838ce7d", + "sha256:aab75d99f3f2874733946a7648ce87a50019eb90baef931698f96b76b6769a46", + "sha256:b1ed2bdb27b4c9fc87058a1cb751c4df8752002143ed393899edb82b131e0546", + "sha256:b360d8fd88d2bad01cb953d81fd2edd4be539df7bfec41e8753fe9f4456a5082", + "sha256:b8f58c7db64d8f27078cbf2a4391af6aa4e4767cc08b37555c4ae064b8558d9b", + "sha256:c1bbb628ed5192124889b51204de27c575b3ffc05a5a91307e7640eff1d48da4", + "sha256:c2ff24df02a125b7b346c4c9078c8936da06964cc2d276292c357d64378158f8", + "sha256:c890728a93fffd0407d7d37c1e6083ff3f9f211c83b4316fae3778417eab9811", + "sha256:c96472b8ca5dc135fb0aa62f79b033f02aa434fb03a8b190600a5ae4102df1fd", + "sha256:ce7866f29d3025b5b34c2e944e66ebef0d92e4a4f2463f7266daa03a1332a651", + "sha256:e26c993bd4b220429d4ec8c1468eca445a4064a61c74ca08da7429af9bc53bb0" + ], + "index": "pypi", + "version": "==5.2.1" }, "isort": { "hashes": [ diff --git a/change_variables.py b/change_variables.py index ed12cdb..aba3923 100644 --- a/change_variables.py +++ b/change_variables.py @@ -3,23 +3,28 @@ import argparse import os + def replace_set(set_file): - """Replace Variable File""" - # Options are Darwin, Linux, Java and Windows. Java not supported - if "Windows" in system(): - os.system("copy variable_sets\\" + str(set_file) + ".py user_variables.py") - else: - os.system("cp variable_sets/" + str(set_file) + ".py user_variables.py") + """Replace Variable File""" + # Options are Darwin, Linux, Java and Windows. Java not supported + if "Windows" in system(): + os.system("copy variable_sets\\" + + str(set_file) + ".py user_variables.py") + else: + os.system("cp variable_sets/" + str(set_file) + + ".py user_variables.py") + def get_variable_set_file(variable_set_arg): - """Checks if the set file was provided via arg else prompt""" - if variable_set_arg: - return variable_set_arg - return input("Enter Set to Import: ") + """Checks if the set file was provided via arg else prompt""" + if variable_set_arg: + return variable_set_arg + return input("Enter Set to Import: ") + if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--set-file', '-s') - args = parser.parse_args() - set_file = get_variable_set_file(args.set_file) - replace_set(set_file) + parser = argparse.ArgumentParser() + parser.add_argument('--set-file', '-s') + args = parser.parse_args() + set_file = get_variable_set_file(args.set_file) + replace_set(set_file) diff --git a/dynatrace/cluster/config.py b/dynatrace/cluster/config.py index 54b9dd1..0d51613 100644 --- a/dynatrace/cluster/config.py +++ b/dynatrace/cluster/config.py @@ -1,13 +1,21 @@ import dynatrace.requests.request_handler as rh + def get_node_info(cluster): - response = rh.cluster_get(cluster,"cluster") - return response.json() + response = rh.make_api_call(cluster=cluster, + endpoint=rh.ClusterAPIs.CLUSTER) + return response.json() + def get_node_config(cluster): - response = rh.cluster_get(cluster,"cluster/configuration") - return response.json() + response = rh.make_api_call(cluster=cluster, + endpoint=rh.ClusterAPIs.CONFIG) + return response.json() + def set_node_config(cluster, json): - response = rh.cluster_post(cluster,"cluster/configuration", json=json) - return response.status_code \ No newline at end of file + response = rh.make_api_call(cluster=cluster, + endpoint=rh.ClusterAPIs.CONFIG, + method=rh.HTTP.POST, + json=json) + return response.status_code diff --git a/dynatrace/cluster/ssl.py b/dynatrace/cluster/ssl.py index d6fe335..b09e4e4 100644 --- a/dynatrace/cluster/ssl.py +++ b/dynatrace/cluster/ssl.py @@ -2,28 +2,31 @@ """Cluster SSL Certificate Operations""" import dynatrace.requests.request_handler as rh + def get_cert_details(cluster, entity_type, entity_id): - """Get SSL Certificate information for Server or Cluster ActiveGate""" - response = rh.cluster_get( - cluster, - "sslCertificate/" + str(entity_type) + "/" + str(entity_id) - ) - return response.json() + """Get SSL Certificate information for Server or Cluster ActiveGate""" + response = rh.make_api_call( + cluster=cluster, + endpoint=f"{rh.ClusterAPIs.SSL}/{entity_type}/{entity_id}" + ) + return response.json() + def get_cert_install_status(cluster, entity_id): - """Get SSL Storage Status for Cluster ActiveGate""" - response = rh.cluster_get( - cluster, - "sslCertificate/store/COLLECTOR/" + str(entity_id) - ) - return response.text + """Get SSL Storage Status for Cluster ActiveGate""" + response = rh.make_api_call( + cluster=cluster, + endpoint=f"{rh.ClusterAPIs.SSL_STORE}/COLLECTOR/{entity_id}" + ) + return response.text -def set_cert(cluster, entity_type, entity_id, ssl_json): - """Set SSL Storage Status for Server or Cluster ActiveGate""" - response = rh.cluster_post( - cluster, - "sslCertificate/store/" + str(entity_type) + "/" + str(entity_id), - json=ssl_json - ) - return response.json() +def set_cert(cluster, entity_type, entity_id, ssl_json): + """Set SSL Storage Status for Server or Cluster ActiveGate""" + response = rh.make_api_call( + cluster=cluster, + method=rh.HTTP.POST, + endpoint=f"{rh.ClusterAPIs.SSL_STORE}/{entity_type}/{entity_id}", + json=ssl_json + ) + return response.json() diff --git a/dynatrace/cluster/sso.py b/dynatrace/cluster/sso.py index 30e1e11..df73b0a 100644 --- a/dynatrace/cluster/sso.py +++ b/dynatrace/cluster/sso.py @@ -3,38 +3,48 @@ ENDPOINT = "sso/ssoProvider" -def disable_sso (cluster): - """Disable SSO Sign-in""" - disable_payload = { - "ssoProvider": "NONE", - "loginPage": "STANDARD", - "ssoEnabled": False, - "ssoGroupsEnabled": False, - "ssoLoginDisabled": True - } - response = rh.cluster_post(cluster, ENDPOINT, json=disable_payload) - return response.status_code + +def disable_sso(cluster): + """Disable SSO Sign-in""" + disable_payload = { + "ssoProvider": "NONE", + "loginPage": "STANDARD", + "ssoEnabled": False, + "ssoGroupsEnabled": False, + "ssoLoginDisabled": True + } + response = rh.make_api_call(cluster=cluster, + endpoint=ENDPOINT, + method=rh.HTTP.POST, + json=disable_payload) + return response.status_code + def enable_sso(cluster, disable_local=False, groups_enabled=False, is_openid=False): - """Turns on SSO that has already been configured""" - enable_payload = { - "ssoProvider":"SAML", - "loginPage":"STANDARD", - "ssoEnabled":True, - "ssoGroupsEnabled":False, - "ssoLoginDisabled":False - } - - if disable_local: - enable_payload['loginPage'] = "SSO" - if groups_enabled: - enable_payload['ssoGroupsEnabled'] = True - if is_openid: - enable_payload['ssoProvider'] = "OIDC" - - response = rh.cluster_post(cluster, ENDPOINT, json=enable_payload) - return response.status_code - -def get_sso_status (cluster): - response = rh.cluster_get(cluster, ENDPOINT) - return response.json() + """Turns on SSO that has already been configured""" + enable_payload = { + "ssoProvider": "SAML", + "loginPage": "STANDARD", + "ssoEnabled": True, + "ssoGroupsEnabled": False, + "ssoLoginDisabled": False + } + + if disable_local: + enable_payload['loginPage'] = "SSO" + if groups_enabled: + enable_payload['ssoGroupsEnabled'] = True + if is_openid: + enable_payload['ssoProvider'] = "OIDC" + + response = rh.make_api_call(cluster=cluster, + endpoint=ENDPOINT, + method=rh.HTTP.POST, + json=enable_payload) + return response.status_code + + +def get_sso_status(cluster): + response = rh.make_api_call(cluster=cluster, + endpoint=ENDPOINT) + return response.json() diff --git a/dynatrace/cluster/user_groups.py b/dynatrace/cluster/user_groups.py index f6d4ec2..c073bac 100644 --- a/dynatrace/cluster/user_groups.py +++ b/dynatrace/cluster/user_groups.py @@ -11,91 +11,102 @@ "view_senstive": "VIEW_SENSITIVE_REQUEST_DATA" } + def generate_group_name(template, user_type, tenant, app_name): - template = template.replace("{USER_TYPE}", user_type) - template = template.replace("{TENANT}", tenant) - template = template.replace("{APP_NAME}", app_name) - template = template.lower() - return template + template = template.replace("{USER_TYPE}", user_type) + template = template.replace("{TENANT}", tenant) + template = template.replace("{APP_NAME}", app_name) + template = template.lower() + return template + def create_app_groups(cluster, app_name): - """Create Dynatrace User Groups for Applications""" - role_types = user_variables.USER_GROUPS['role_types'] - role_tenants = user_variables.USER_GROUPS['role_tenants'] - - all_new_groups = {} - for current_tenant in role_tenants: - all_new_groups[current_tenant] = {} - for current_type_key, current_type_value in role_types.items(): - group_id = generate_group_name(user_variables.USER_GROUP_TEMPLATE, current_type_value, current_tenant, app_name) - current_group = { - "isClusterAdminGroup": False, - "name":group_id, - "ldapGroupNames": [ - group_id, - ], - "accessRight": {} - } - - response = rh.cluster_post( - cluster, - "groups", - json=current_group - ) - all_new_groups[current_tenant][current_type_key] = ((response.json())['id']) - return all_new_groups + """Create Dynatrace User Groups for Applications""" + role_types = user_variables.USER_GROUPS['role_types'] + role_tenants = user_variables.USER_GROUPS['role_tenants'] + + all_new_groups = {} + for current_tenant in role_tenants: + all_new_groups[current_tenant] = {} + for current_type_key, current_type_value in role_types.items(): + group_id = generate_group_name( + user_variables.USER_GROUP_TEMPLATE, current_type_value, current_tenant, app_name) + current_group = { + "isClusterAdminGroup": False, + "name": group_id, + "ldapGroupNames": [ + group_id, + ], + "accessRight": {} + } + + response = rh.make_api_call( + cluster=cluster, + endpoint=rh.ClusterAPIs.GROUPS, + method=rh.HTTP.POST, + json=current_group + ) + all_new_groups[current_tenant][current_type_key] = ( + (response.json())['id']) + return all_new_groups + def create_app_groups_setwide(app_name): - """Create Dynatrace User Groups for Applications""" - for cluster in user_variables.FULL_SET.values(): - if cluster['is_managed']: - create_app_groups(cluster, app_name) - -def delete_app_groups (cluster, app_name): - role_types = user_variables.USER_GROUPS['role_types'] - role_tenants = user_variables.USER_GROUPS['role_tenants'] - - for current_tenant in role_tenants: - for current_type_value in role_types: - group_id = generate_group_name(user_variables.USER_GROUP_TEMPLATE, current_type_value, current_tenant, app_name) - group_id = ''.join(e for e in group_id if e.isalnum()) - rh.cluster_delete( - cluster, - "groups/" + group_id - ) + """Create Dynatrace User Groups for Applications""" + for cluster in user_variables.FULL_SET.values(): + if cluster['is_managed']: + create_app_groups(cluster, app_name) + + +def delete_app_groups(cluster, app_name): + role_types = user_variables.USER_GROUPS['role_types'] + role_tenants = user_variables.USER_GROUPS['role_tenants'] + + for current_tenant in role_tenants: + for current_type_value in role_types: + group_id = generate_group_name( + user_variables.USER_GROUP_TEMPLATE, current_type_value, current_tenant, app_name) + group_id = ''.join(e for e in group_id if e.isalnum()) + rh.make_api_call( + cluster=cluster, + method=rh.HTTP.DELETE, + endpoint=f"{rh.ClusterAPIs.GROUPS}/{group_id}" + ) + def delete_app_groups_setwide(app_name): - """Create Dynatrace User Groups for Applications""" - for cluster in user_variables.FULL_SET.values(): - if cluster['is_managed']: - delete_app_groups(cluster, app_name) + """Create Dynatrace User Groups for Applications""" + for cluster in user_variables.FULL_SET.values(): + if cluster['is_managed']: + delete_app_groups(cluster, app_name) + def create_app_clusterwide(cluster, app_name, zones=None): - """Create App User Groups and Management Zones""" - # Create Standard App MZs - mz_list = {} - for tenant_key in cluster['tenant'].keys(): - mzh.add_management_zone( - cluster, - tenant_key, - str.upper(app_name) - ) - if tenant_key in zones: - mz_list[tenant_key] = [] - for zone in zones[tenant_key]: - mz_id = mzh.add_management_zone( + """Create App User Groups and Management Zones""" + # Create Standard App MZs + mz_list = {} + for tenant_key in cluster['tenant'].keys(): + mzh.add_management_zone( cluster, tenant_key, - str.upper(app_name), - zone + str.upper(app_name) ) - if mz_id is not None: - mz_list[tenant_key].append(mz_id) + if tenant_key in zones: + mz_list[tenant_key] = [] + for zone in zones[tenant_key]: + mz_id = mzh.add_management_zone( + cluster, + tenant_key, + str.upper(app_name), + zone + ) + if mz_id is not None: + mz_list[tenant_key].append(mz_id) - # Create User Groups - user_groups = create_app_groups(cluster, app_name) - print(user_groups) + # Create User Groups + user_groups = create_app_groups(cluster, app_name) + print(user_groups) - # for tenant in user_variables.USER_GROUPS['role_tenants']: - # if "access_env" in user_groups [tenant]: - # add_mz_to_user \ No newline at end of file + # for tenant in user_variables.USER_GROUPS['role_tenants']: + # if "access_env" in user_groups [tenant]: + # add_mz_to_user diff --git a/dynatrace/cluster/users.py b/dynatrace/cluster/users.py index 65ceda2..2331503 100644 --- a/dynatrace/cluster/users.py +++ b/dynatrace/cluster/users.py @@ -1,51 +1,73 @@ """User Operations in Cluster Mangement""" import dynatrace.requests.request_handler as rh +from dynatrace.exceptions import ManagedClusterOnlyException # TODO add check for is_managed + def check_is_managed(cluster, ignore_saas): - """Checks if the cluster is Managed""" - if not cluster['is_managed'] and not ignore_saas: - raise Exception ('Cannot run operation on SaaS instances!') - return cluster['is_managed'] + """Checks if the cluster is Managed""" + if not cluster['is_managed'] and not ignore_saas: + raise ManagedClusterOnlyException() + return cluster['is_managed'] + def get_users(cluster, ignore_saas=True): - """Get the list of Users on the Cluster""" - check_is_managed(cluster, ignore_saas) - response = rh.cluster_get(cluster, "users") - return response.json() + """Get the list of Users on the Cluster""" + check_is_managed(cluster, ignore_saas) + response = rh.make_api_call(cluster=cluster, + endpoint=rh.ClusterAPIs.USERS) + return response.json() + def add_user(cluster, user_json, ignore_saas=True): - """Add User to Cluster""" - check_is_managed(cluster, ignore_saas) - rh.cluster_post(cluster, "/users", json=user_json) - return 'OK' + """Add User to Cluster""" + check_is_managed(cluster, ignore_saas) + rh.make_api_call(cluster=cluster, + endpoint=rh.ClusterAPIs.USERS, + method=rh.HTTP.POST, + json=user_json) + return 'OK' + def update_user(cluster, user_json, ignore_saas=True): - """Update User to Cluster""" - check_is_managed(cluster, ignore_saas) - rh.cluster_put(cluster, "/users", json=user_json) - return 'OK' + """Update User to Cluster""" + check_is_managed(cluster, ignore_saas) + rh.make_api_call(cluster=cluster, + endpoint=rh.ClusterAPIs.USERS, + method=rh.HTTP.PUT, + json=user_json) + return 'OK' + def get_user(cluster, user_id, ignore_saas=True): - """Get Details for a Single User""" - check_is_managed(cluster, ignore_saas) - response = rh.cluster_get(cluster, "users/" + user_id) - return response.json() + """Get Details for a Single User""" + check_is_managed(cluster, ignore_saas) + response = rh.make_api_call(cluster=cluster, + endpoint=f"{rh.ClusterAPIs.USERS}/{user_id}") + return response.json() + def delete_user(cluster, user_id, ignore_saas=True): - """Delete a Single User""" - check_is_managed(cluster, ignore_saas) - response = rh.cluster_delete(cluster, "users/" + user_id) - return response.json() + """Delete a Single User""" + check_is_managed(cluster, ignore_saas) + response = rh.cluster_delete(cluster=cluster, + method=rh.HTTP.DELETE, + endpoint=f"{rh.ClusterAPIs.USERS}/{user_id}") + return response.json() + def get_user_count(cluster, ignore_saas=True): - """Return the number of, users in a cluster""" - check_is_managed(cluster, ignore_saas) - return len(get_users(cluster)) + """Return the number of, users in a cluster""" + check_is_managed(cluster, ignore_saas) + return len(get_users(cluster)) + def add_user_bulk(cluster, user_json, ignore_saas=True): - """Add Multiple Users""" - check_is_managed(cluster, ignore_saas) - rh.cluster_put(cluster, "/users/bulk", json=user_json) - return 'OK' + """Add Multiple Users""" + check_is_managed(cluster, ignore_saas) + rh.make_api_call(cluster=cluster, + method=rh.HTTP.POST, + endpoint=f"{rh.ClusterAPIs.USERS}/bulk", + json=user_json) + return 'OK' diff --git a/dynatrace/exceptions.py b/dynatrace/exceptions.py new file mode 100644 index 0000000..48367a8 --- /dev/null +++ b/dynatrace/exceptions.py @@ -0,0 +1,25 @@ +''' +Module containing all the custom exceptions for this project +''' +from sys import stderr + + +class InvalidAPIResponseException (Exception): + def __init__(self, message): + print(message, file=stderr) + + +class InvalidDateFormatException(ValueError): + def __init__(self, required_format): + self.message = f"Incorrect Date for following entry: {required_format}" + +class InvalidScopeException(ValueError): + def __init__(self, required_format): + self.required_format = required_format + print("Invalid scope used. Tag required for management zone, matching rule: %s", + required_format, file=stderr) + + +class ManagedClusterOnlyException(TypeError): + def __init__(self): + print("This operation is only supported on Dynatrace Managed!", file=stderr) diff --git a/dynatrace/requests/request_handler.py b/dynatrace/requests/request_handler.py index d411f63..3baf04d 100644 --- a/dynatrace/requests/request_handler.py +++ b/dynatrace/requests/request_handler.py @@ -1,300 +1,164 @@ """Make API Request to available Dynatrace API""" -import warnings -import contextlib import requests -from urllib3.exceptions import InsecureRequestWarning +import time +from dynatrace.exceptions import InvalidAPIResponseException, ManagedClusterOnlyException +from enum import Enum, auto -HTTPS_STR = "https://" -CLUSTER_V1_PATH = "/api/v1.0/onpremise/" -ENV_API_V1 = "/api/v1/" -CONFIG_API_V1 = "/api/config/v1/" - -OLD_MERGE_ENVIRONMENT_SETTINGS = requests.Session.merge_environment_settings - -@contextlib.contextmanager -def no_ssl_verification(): - """Silence Request Warning for Unchecked SSL""" - opened_adapters = set() - - def merge_environment_settings(self, url, proxies, stream, verify, cert): - # Verification happens only once per connection so we need to close - # all the opened adapters once we're done. Otherwise, the effects of - # verify=False persist beyond the end of this context manager. - opened_adapters.add(self.get_adapter(url)) - - settings = OLD_MERGE_ENVIRONMENT_SETTINGS(self, url, proxies, stream, verify, cert) - - return settings - - requests.Session.merge_environment_settings = merge_environment_settings - - try: - with warnings.catch_warnings(): - warnings.simplefilter('ignore', InsecureRequestWarning) - yield - finally: - requests.Session.merge_environment_settings = OLD_MERGE_ENVIRONMENT_SETTINGS - - for adapter in opened_adapters: - try: - adapter.close() - except Exception: - pass - -def check_response(response): - """Checks if the Reponse has a Successful Status Code""" - if not 200 <= response.status_code <= 299: - raise Exception( - "Response Error\n" + response.url + "\n" + str(response.status_code) + "\n" + response.text - ) - -def check_managed(managed_bool): - """Checks if the Cluster Operation is valid (Managed) for the current cluster""" - if not managed_bool: - raise Exception("Cluster Operations not supported for SaaS!") - -def sanitize_endpoint (endpoint): - if endpoint[0] == '/': - endpoint = endpoint [1:] - return endpoint - -def generate_tenant_url(cluster, tenant): - """Generate URL based on SaaS or Managed""" - url = HTTPS_STR - if cluster["is_managed"]: - url = url + cluster['url'] + "/e/" + cluster['tenant'][tenant] - else: - url = url + cluster['tenant'][tenant] + "." + cluster['url'] - return url - -def cluster_get(cluster, endpoint, params=None): - """Get Request to Cluster API""" - check_managed(cluster["is_managed"]) - - if not params: - params = {} - - endpoint = sanitize_endpoint(endpoint) - - with no_ssl_verification(): - params['Api-Token'] = cluster['cluster_token'] - - response = requests.get( - HTTPS_STR + cluster['url'] + CLUSTER_V1_PATH + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]) - ) - check_response(response) - return response - - -def cluster_post(cluster, endpoint, params=None, json=None): - """Post Request to Cluster API""" - check_managed(cluster["is_managed"]) - - if not params: - params = {} - - endpoint = sanitize_endpoint(endpoint) - - with no_ssl_verification(): - params['Api-Token'] = cluster['cluster_token'] - - response = requests.post( - HTTPS_STR + cluster['url'] + CLUSTER_V1_PATH + endpoint, - params=params, - json=json, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]) - ) - check_response(response) - return response - -def cluster_put(cluster, endpoint, params=None, json=None): - """Post Request to Cluster API""" - check_managed(cluster["is_managed"]) - - if not params: - params = {} - - endpoint = sanitize_endpoint(endpoint) - - with no_ssl_verification(): - params['Api-Token'] = cluster['cluster_token'] - - response = requests.put( - HTTPS_STR + cluster['url'] + CLUSTER_V1_PATH + endpoint, - params=params, - json=json, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]) - ) - check_response(response) - return response +requests.packages.urllib3.disable_warnings() -def cluster_delete(cluster, endpoint, params=None, json=None): - """Delete Request to Cluster API""" - check_managed(cluster["is_managed"]) - - if not params: - params = {} - - endpoint = sanitize_endpoint(endpoint) - - with no_ssl_verification(): - params['Api-Token'] = cluster['cluster_token'] - response = requests.delete( - HTTPS_STR + cluster['url'] + CLUSTER_V1_PATH + endpoint, - params=params, - json=json, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]) - ) - check_response(response) - return response - -def env_get(cluster, tenant, endpoint, params=None): - """Get Request to Tenant Environment API""" - if not params: - params = {} - - endpoint = sanitize_endpoint(endpoint) - - with no_ssl_verification(): - params['Api-Token'] = cluster['api_token'][tenant] - response = requests.get( - generate_tenant_url(cluster, tenant) + ENV_API_V1 + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]) - ) - check_response(response) - return response - -def env_post(cluster, tenant, endpoint, params=None, json=None): - """Post Request to Tenant Environment API""" - if not params: - params = {} - - endpoint = sanitize_endpoint(endpoint) - - with no_ssl_verification(): - params['Api-Token'] = cluster['api_token'][tenant] - - response = requests.post( - generate_tenant_url(cluster, tenant) + ENV_API_V1 + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]), - json=json - ) - check_response(response) - return response - -def env_put(cluster, tenant, endpoint, params=None, json=None): - """Post Request to Tenant Environment API""" - if not params: - params = {} - - endpoint = sanitize_endpoint(endpoint) - - with no_ssl_verification(): - params['Api-Token'] = cluster['api_token'][tenant] - - response = requests.put( - generate_tenant_url(cluster, tenant) + ENV_API_V1 + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]), - json=json - ) - check_response(response) - return response - -def env_delete(cluster, tenant, endpoint, params=None): - """Get Request to Tenant Environment API""" - if not params: - params = {} - - endpoint = sanitize_endpoint(endpoint) - - with no_ssl_verification(): - params['Api-Token'] = cluster['api_token'][tenant] - response = requests.delete( - generate_tenant_url(cluster, tenant) + ENV_API_V1 + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]) - ) - check_response(response) - return response - - -def config_get(cluster, tenant, endpoint, params=None, json=None): - """Get Request to Tenant Configuration API""" - if not params: - params = {} +HTTPS_STR = "https://" - endpoint = sanitize_endpoint(endpoint) - with no_ssl_verification(): - params['Api-Token'] = cluster['api_token'][tenant] +class ClusterAPIs(Enum): + """ + Enum representing Dynatrace Cluster REST API endpoints.\n + Use these values when adding the 'endpoint' argument. + """ + BASE = "/api/v1.0/onpremise" + CLUSTER = f"{BASE}/cluster" + CONFIG = f"{CLUSTER}/configuration" + CONFIG_STATUS = f"{CONFIG}/status" + SSL = f"{BASE}/sslCertificate" + SSL_STORE = f"{SSL}/store" + SSO = "" # Need to confirm endpoint + GROUPS = f"{BASE}/groups" + USERS = f"{BASE}/users" + + def __str__(self): + return self.value + + +class TenantAPIs(Enum): + """ + Enum representing Dynatrace Tenant REST API endpoints.\n + Use these values when adding the 'endpoint' argument. + """ + PROBLEM_DETAILS = "/api/v1/problem/details" + PROBLEM_FEED = "/api/v1/problem/feed" + PROBLEM_STATUS = "/api/v1/problem/status" + DEPLOY_ONEAGENT = "/api/v1/deployment/installer/agent" + DEPLOY_ONEAGENT_CONNECTION_INFO = "/api/v1/deployment/installer/agent/connectioninfo" + DEPLOY_ONEAGENT_CONNECTION_ENDPOINTS = "/api/v1/deployment/installer/agent/connectioninfo/endpoints" + DEPLOY_ACTIVEGATE = "/api/v1/deployment/installer/gateway" + DEPLOY_BOSH = "/api/v1/deployment/boshrelease" + EVENTS = "/api/v1/events" + USER_SESSIONS = "/api/v1/userSessionQueryLanguage" + TOKENS = "/api/v1/tokens" + SYNTHETIC_MONITORS = "/api/v1/synthetic/monitors" + SYNTHETIC_LOCATIONS = "/api/v1/synthetic/locations" + SYNTHETIC_NODES = "/api/v1/synthetic/nodes" + ENTITIES = "/api/v2/entities" + METRICS = "/api/v2/metrics" + TAGS = "/api/v2/tags" + NETWORK_ZONES = "/api/v2/networkZones" + MANAGEMENT_ZONES = "/api/config/v1/managementZones" + V1_TOPOLOGY = "/api/v1/entity" + MAINTENANCE_WINDOWS = "/api/config/v1/maintenanceWindows" + ONEAGENTS = "/api/v1/oneagents" + EXTENSIONS = "/api/config/v1/extensions" + REQUEST_ATTRIBUTES = "/api/config/v1/service/requestAttributes/" + REQUEST_NAMING = "/api/config/v1/service/requestNaming" + + def __str__(self): + return self.value + + +class HTTP(Enum): + ''' + Enum representing HTTP request methods.\n + Use these values when adding the 'method' argument. + ''' + GET = auto() + PUT = auto() + POST = auto() + DELETE = auto() + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + +def make_api_call(cluster, endpoint, tenant=None, params=None, json=None, method=HTTP.GET): + ''' + Function makes an API call in a safe way, taking into account the rate limits. + This will ensure the API call will always go through, with the program waiting for the limit to reset if needed.\n + + @param cluster - Cluster dictionary from variable_set\n + @param endpoint - API endpoint to call.\n + @param tenant - String of tenant name used in cluster dictionary\n + @param json - dictionary to be converted to JSON request\n + @param method - HTTP method to use in call. Use HTTP enum.\n + \n + @return - response from request\n + ''' + # Set the right URL for the operation + url = f"{generate_tenant_url(cluster, tenant)}{endpoint}" if tenant else f"{HTTPS_STR}{cluster['url']}" + + if not params: + params = {} + + # Get correct token for the operation + if 'onpremise' in str(endpoint) or 'cluster' in str(endpoint): + check_managed(cluster) + headers = dict(Authorization=f"Api-Token {cluster['cluster_token']}") + else: + headers = dict(Authorization=f"Api-Token {cluster['api_token'][tenant]}") + + # Loop to retry in case of rate limits + while True: + response = requests.request( + method=str(method), + url=url, + params=params, + headers=headers, + json=json, + verify=cluster.get('verify_ssl') + ) + if check_response(response): + break - response = requests.get( - generate_tenant_url(cluster, tenant) + CONFIG_API_V1 + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]), - json=json - ) - check_response(response) return response -def config_post(cluster, tenant, endpoint, params=None, json=None): - """Post Request to Tenant Configuration API""" - if not params: - params = {} - endpoint = sanitize_endpoint(endpoint) +def check_response(response): + ''' + Checks if the Response has a Successful Status Code - with no_ssl_verification(): - params['Api-Token'] = cluster['api_token'][tenant] + @param response - The response variable returned from a request\n - response = requests.post( - generate_tenant_url(cluster, tenant) + CONFIG_API_V1 + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]), - json=json - ) - check_response(response) - return response + ''' + headers = response.headers -def config_put(cluster, tenant, endpoint, params=None, json=None): - """Put Request to Tenant Configuration API""" - if not params: - params = {} + if response.status_code == 429: + print("Endpoint request limit of " + f"{headers['x-ratelimit-limit']} was reached!") + # Wait until the limit resets and try again + time_to_wait = int(headers['x-ratelimit-reset'])/1000000 - time.time() - endpoint = sanitize_endpoint(endpoint) + # Check that there's actually time to wait + if time_to_wait > 0: + print(f"Waiting {time_to_wait} sec until the limit resets.") + time.sleep(float(time_to_wait)) + return False + elif not 200 <= response.status_code <= 299: + raise InvalidAPIResponseException(f"Response Error:\n{response.url}\n{response.status_code}\n{response.text}") - with no_ssl_verification(): - params['Api-Token'] = cluster['api_token'][tenant] + return True - response = requests.put( - generate_tenant_url(cluster, tenant) + CONFIG_API_V1 + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]), - json=json - ) - check_response(response) - return response -def config_delete(cluster, tenant, endpoint, params=None, json=None): - """Delete Request to Tenant Configuration API""" - if not params: - params = {} +def check_managed(cluster): + """Checks if the Cluster Operation is valid (Managed) for the current cluster""" + if not cluster['is_managed']: + raise ManagedClusterOnlyException() - endpoint = sanitize_endpoint(endpoint) - with no_ssl_verification(): - params['Api-Token'] = cluster['api_token'][tenant] - - response = requests.delete( - generate_tenant_url(cluster, tenant) + CONFIG_API_V1 + endpoint, - params=params, - verify=(True if "verify_ssl" not in cluster else cluster ["verify_ssl"]), - json=json - ) - check_response(response) - return response +def generate_tenant_url(cluster, tenant): + """Generate URL based on SaaS or Managed""" + url = HTTPS_STR + if cluster["is_managed"]: + url += cluster['url'] + "/e/" + cluster['tenant'][tenant] + else: + url += cluster['tenant'][tenant] + "." + cluster['url'] + return url diff --git a/dynatrace/tenant/extensions.py b/dynatrace/tenant/extensions.py new file mode 100644 index 0000000..a5967e8 --- /dev/null +++ b/dynatrace/tenant/extensions.py @@ -0,0 +1,34 @@ +from dynatrace.requests import request_handler as rh + +ENDPOINT = rh.TenantAPIs.EXTENSIONS + + +def get_all_extensions(cluster, tenant, params=None): + """ Gets the list of all extensions available""" + # TODO: Add pagination + + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + params=params) + return response.json().get('extensions') + + +def get_extension_details(cluster, tenant, extension_id): + """ Get the details of a specific extension""" + + response = rh.make_api_call(cluster=cluster, + endpoint=f"{ENDPOINT}/{extension_id}", + tenant=tenant) + return response.json() + + +def get_extension_states(cluster, tenant, extension_id, params=None): + """ Gets all the deployment states of a specific extension""" + # TODO: Add pagination + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{extension_id}/states", + params=params) + + return response.json().get('states') diff --git a/dynatrace/tenant/host_groups.py b/dynatrace/tenant/host_groups.py index b927775..495b012 100644 --- a/dynatrace/tenant/host_groups.py +++ b/dynatrace/tenant/host_groups.py @@ -1,7 +1,5 @@ """Host Group Information for Tenant""" -import user_variables -from dynatrace.topology import hosts as topology_hosts -from dynatrace.requests import request_handler as rh +from dynatrace.tenant.topology import hosts as topology_hosts # TODO redo export function (break out to export function?) # def export_host_groups_setwide(full_set): @@ -12,27 +10,35 @@ # outFile.write(groupName+"\n") # print(envName + " writing to 'HostGroups - " + envName + ".txt'") + def get_host_groups_tenantwide(cluster, tenant): - params = { - 'relativeTime':'day', - 'includeDetails':'true' - } - response = topology_hosts.get_hosts_tenantwide(cluster, tenant, params=params) - host_groups = {} - for host in response: - host_groups[host['hostGroup']['meId']] = host['hostGroup']['name'] - return host_groups - -def get_host_groups_clusterwide (cluster): - # TODO add split_by_tenant optional variable - host_groups_custerwide = {} - for tenant in cluster['tenant']: - host_groups_custerwide.update(get_host_groups_tenantwide(cluster, tenant)) - return host_groups_custerwide - -def get_host_groups_setwide (full_set): - # TODO add split_by_tenant optional variable - host_groups_setwide = {} - for cluster in full_set.values(): - host_groups_setwide.update(get_host_groups_clusterwide(cluster)) - return host_groups_setwide \ No newline at end of file + params = { + 'relativeTime': 'day', + 'includeDetails': 'true' + } + response = topology_hosts.get_hosts_tenantwide(cluster, + tenant, + params=params) + host_groups = {} + for host in response: + if host.get('hostGroup'): + host_groups[host['hostGroup']['meId']] = host['hostGroup']['name'] + return host_groups + + +def get_host_groups_clusterwide(cluster): + # TODO add split_by_tenant optional variable + host_groups_custerwide = {} + for tenant in cluster['tenant']: + host_groups_custerwide.update( + get_host_groups_tenantwide(cluster, tenant) + ) + return host_groups_custerwide + + +def get_host_groups_setwide(full_set): + # TODO add split_by_tenant optional variable + host_groups_setwide = {} + for cluster in full_set.values(): + host_groups_setwide.update(get_host_groups_clusterwide(cluster)) + return host_groups_setwide diff --git a/dynatrace/tenant/maintenance.py b/dynatrace/tenant/maintenance.py index a49dc27..c1aef4e 100644 --- a/dynatrace/tenant/maintenance.py +++ b/dynatrace/tenant/maintenance.py @@ -3,160 +3,392 @@ import re import dynatrace.requests.request_handler as rh import user_variables as uv +from dynatrace.exceptions import InvalidDateFormatException +from enum import Enum, auto -MZ_ENDPOINT = "/maintenanceWindows/" +MZ_ENDPOINT = rh.TenantAPIs.MAINTENANCE_WINDOWS -class InvalidDateFormatException(ValueError): - def __init__(self, required_format): - self.required_format = required_format - print ("Incorrect Date for following entry: %s", required_format) -class InvalidScopeException(ValueError): - def __init__(self, required_format): - self.required_format = required_format - print ("Invalid scope used. Tag required for management zone, matching rule: %s", required_format) +class Suppression(Enum): + """ + Types of suppression for create Maintenance Window JSON. Suppression is required + + Args: + Enum (DETECT_PROBLEMS_AND_ALERT): Full Alerting. Entites in scope will have notes that a Maintenance Window was active + Enum (DETECT_PROBLEMS_DONT_ALERT): Problems detected but alerting profiles in that scope are not triggered + Enum (DONT_DETECT_PROBLEMS): Problem detection completely off for the scope + """ + DETECT_PROBLEMS_AND_ALERT = auto() + DETECT_PROBLEMS_DONT_ALERT = auto() + DONT_DETECT_PROBLEMS = auto() + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + +class DayOfWeek(Enum): + """ + Day of the Week + + Args: + Enum (MONDAY): MONDAY + Enum (TUESDAY): TUESDAY + Enum (WEDNESDAY): WEDNESDAY + Enum (THURSDAY): THURSDAY + Enum (FRIDAY): FRIDAY + Enum (SATURDAY): SATURDAY + Enum (SUNDAY): SUNDAY + """ + + MONDAY = auto() + TUESDAY = auto() + WEDNESDAY = auto() + THURSDAY = auto() + FRIDAY = auto() + SATURDAY = auto() + SUNDAY = auto() + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + +class Context(Enum): + """Tag Contexts that are available""" + AWS = auto() + AWS_GENERIC = auto() + AZURE = auto() + CLOUD_FOUNDRY = auto() + CONTEXTLESS = auto() + ENVIRONMENT = auto() + GOOGLE_CLOUD = auto() + KUBERNETES = auto() + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + +class RecurrenceType(Enum): + """Recurrence of the Maintenance Window""" + DAILY = auto() + MONTHLY = auto() + ONCE = auto() + WEEKLY = auto() + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + +class FilterType(Enum): + """All Filter Types available for tag filters""" + APM_SECURITY_GATEWAY = auto() + APPLICATION = auto() + APPLICATION_METHOD = auto() + APPLICATION_METHOD_GROUP = auto() + APPMON_SERVER = auto() + APPMON_SYSTEM_PROFILE = auto() + AUTO_SCALING_GROUP = auto() + AUXILIARY_SYNTHETIC_TEST = auto() + AWS_APPLICATION_LOAD_BALANCER = auto() + AWS_AVAILABILITY_ZONE = auto() + AWS_CREDENTIALS = auto() + AWS_LAMBDA_FUNCTION = auto() + AWS_NETWORK_LOAD_BALANCER = auto() + AZURE_API_MANAGEMENT_SERVICE = auto() + AZURE_APPLICATION_GATEWAY = auto() + AZURE_COSMOS_DB = auto() + AZURE_CREDENTIALS = auto() + AZURE_EVENT_HUB = auto() + AZURE_EVENT_HUB_NAMESPACE = auto() + AZURE_FUNCTION_APP = auto() + AZURE_IOT_HUB = auto() + AZURE_LOAD_BALANCER = auto() + AZURE_MGMT_GROUP = auto() + AZURE_REDIS_CACHE = auto() + AZURE_REGION = auto() + AZURE_SERVICE_BUS_NAMESPACE = auto() + AZURE_SERVICE_BUS_QUEUE = auto() + AZURE_SERVICE_BUS_TOPIC = auto() + AZURE_SQL_DATABASE = auto() + AZURE_SQL_ELASTIC_POOL = auto() + AZURE_SQL_SERVER = auto() + AZURE_STORAGE_ACCOUNT = auto() + AZURE_SUBSCRIPTION = auto() + AZURE_TENANT = auto() + AZURE_VM = auto() + AZURE_VM_SCALE_SET = auto() + AZURE_WEB_APP = auto() + CF_APPLICATION = auto() + CF_FOUNDATION = auto() + CINDER_VOLUME = auto() + CLOUD_APPLICATION = auto() + CLOUD_APPLICATION_INSTANCE = auto() + CLOUD_APPLICATION_NAMESPACE = auto() + CONTAINER_GROUP = auto() + CONTAINER_GROUP_INSTANCE = auto() + CUSTOM_APPLICATION = auto() + CUSTOM_DEVICE = auto() + CUSTOM_DEVICE_GROUP = auto() + DCRUM_APPLICATION = auto() + DCRUM_SERVICE = auto() + DCRUM_SERVICE_INSTANCE = auto() + DEVICE_APPLICATION_METHOD = auto() + DISK = auto() + DOCKER_CONTAINER_GROUP = auto() + DOCKER_CONTAINER_GROUP_INSTANCE = auto() + DYNAMO_DB_TABLE = auto() + EBS_VOLUME = auto() + EC2_INSTANCE = auto() + ELASTIC_LOAD_BALANCER = auto() + ENVIRONMENT = auto() + EXTERNAL_SYNTHETIC_TEST_STEP = auto() + GCP_ZONE = auto() + GEOLOCATION = auto() + GEOLOC_SITE = auto() + GOOGLE_COMPUTE_ENGINE = auto() + HOST = auto() + HOST_GROUP = auto() + HTTP_CHECK = auto() + HTTP_CHECK_STEP = auto() + HYPERVISOR = auto() + KUBERNETES_CLUSTER = auto() + KUBERNETES_NODE = auto() + MOBILE_APPLICATION = auto() + NETWORK_INTERFACE = auto() + NEUTRON_SUBNET = auto() + OPENSTACK_PROJECT = auto() + OPENSTACK_REGION = auto() + OPENSTACK_VM = auto() + OS = auto() + PROCESS_GROUP = auto() + PROCESS_GROUP_INSTANCE = auto() + RELATIONAL_DATABASE_SERVICE = auto() + SERVICE = auto() + SERVICE_INSTANCE = auto() + SERVICE_METHOD = auto() + SERVICE_METHOD_GROUP = auto() + SWIFT_CONTAINER = auto() + SYNTHETIC_LOCATION = auto() + SYNTHETIC_TEST = auto() + SYNTHETIC_TEST_STEP = auto() + VIRTUALMACHINE = auto() + VMWARE_DATACENTER = auto() + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + def validate_datetime(datetime_text, required_format): - try: - datetime.datetime.strptime(datetime_text, required_format) - except ValueError as e: - raise InvalidDateFormatException(required_format) - -def generate_scope(entities=None, filter_type=None, management_zone_id=None, tags=None, matches_any_tag=False): - if entities is None: - entities = [] - matches = [] - matches_payload = {} - if isinstance (filter_type, str): - matches_payload['type'] = filter_type - if management_zone_id: - matches_payload['managementZoneId'] = management_zone_id - if isinstance(tags, list): - matches_payload['tags'] = tags - - matches.append(matches_payload) - - scope = { - 'entities': entities, - 'matches': matches - } - return scope + try: + datetime.datetime.strptime(datetime_text, required_format) + except ValueError: + raise InvalidDateFormatException(required_format) + + +def generate_tag_scope(tag, filter_type=None, management_zone_id=None): + tag_payload = {} + + if management_zone_id: + tag_payload['mzId'] = str(management_zone_id) + + if filter_type: + if filter_type in FilterType._member_names_: + tag_payload['type'] = filter_type + else: + raise ValueError( + "Invalid Filter Type! " + + "Please Refer to Enum or Dynatrace Documentation" + ) + + if isinstance(tag, list) and len(tag) > 0: + tag_payload['tags'] = tag + elif isinstance(tag, dict): + tag_payload['tags'] = [tag] + elif isinstance(tag, str): + tag_payload['tags'] = [{'context': "CONTEXTLESS", 'key': tag}] + + return tag_payload + + +def generate_scope(entities=None, tags=None, filter_type=None, management_zone_id=None, match_any_tag=True): + if entities is None: + entities = [] + matches = [] + + if match_any_tag and isinstance(tags, list) and len(tags) > 1: + for tag in tags: + matches.append( + generate_tag_scope( + tag, + filter_type=filter_type, + management_zone_id=management_zone_id + ) + ) + else: + matches.append( + generate_tag_scope( + tags, + filter_type=filter_type, + management_zone_id=management_zone_id + ) + ) + + scope = { + 'entities': entities, + 'matches': matches + } + return scope + def generate_window_json(name, description, suppression, schedule, scope=None, is_planned=False,): - """Generate JSON information needed for creating Maintenance Window""" - window_json = { - "name": name, - "description": description, - "suppression": suppression, - "schedule": schedule - } - window_json ['type'] = "PLANNED" if is_planned else "UNPLANNED" - if scope is not None: - window_json['scope'] = scope - return window_json + """Generate JSON information needed for creating Maintenance Window""" + window_json = { + "name": name, + "description": description, + "suppression": str(suppression), + "schedule": schedule + } + window_json['type'] = "PLANNED" if is_planned else "UNPLANNED" + if scope is not None: + window_json['scope'] = scope + return window_json + def generate_schedule(recurrence_type, start_time, duration, range_start, range_end, day=None, zoneId=None,): - """Create schedule structure for maintenance window""" - # This structure requires a lot of input validation - types_available = [ "DAILY", "MONTHLY", "ONCE", "WEEKLY" ] - days_of_week = [ "FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY" ] - - recurrence_type = str(recurrence_type).upper() - - # Check Recurrence - if recurrence_type not in types_available: - raise Exception ("Invalid Recurrence Type! Allowed values are: ONCE, DAILY, WEEKLY, MONTHLY") - - # Check ranges - validate_datetime(range_start, "%Y-%m-%d %H:%M") - validate_datetime(range_end, "%Y-%m-%d %H:%M") - - schedule = { - "recurrenceType": recurrence_type, - "start": range_start, - "end": range_end - } - - if zoneId is None: - schedule['zoneId'] = uv.DEFAULT_TIMEZONE - - if recurrence_type != "ONCE": - # Check Start Time - validate_datetime(start_time, "%H:%M") - - # Check Duration - try: - int(duration) - except ValueError: - ("Duration time must be an integer! Duration is length of Maintainence Window in minutes") + """Create schedule structure for maintenance window""" + # This structure requires a lot of input validation + recurrence_type = str(recurrence_type).upper() + + # Check Recurrence + if recurrence_type not in RecurrenceType._member_names_: + raise ValueError( + "Invalid Recurrence Type! Allowed values are: ONCE, DAILY, WEEKLY, MONTHLY") - schedule['recurrence'] = { - "startTime": start_time, - "durationMinutes": duration + # Check ranges + validate_datetime(range_start, "%Y-%m-%d %H:%M") + validate_datetime(range_end, "%Y-%m-%d %H:%M") + + schedule = { + "recurrenceType": recurrence_type, + "start": range_start, + "end": range_end } - # Check Weekly Day - if recurrence_type == "WEEKLY": - day = str(day).upper() - if day in days_of_week: - schedule['recurrence']['dayOfWeek'] = day - else: - raise Exception ("Invalid Weekly Day! Allowed values are " \ - + "SUNDAY, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY") + if zoneId is None: + schedule['zoneId'] = uv.DEFAULT_TIMEZONE - # Check Monthly Day - if recurrence_type == "MONTHLY": - if (1 <= int(day) <= 31): - schedule['recurrence']['dayOfMonth'] = day - else: - raise Exception ("Invalid Monthly Day! Allowed values are 1-31") + if recurrence_type != "ONCE": + # Check Start Time + validate_datetime(start_time, "%H:%M") - return schedule + # Check Duration + try: + int(duration) + except ValueError: + ("Duration time must be an integer! Duration is length of Maintainence Window in minutes") -def create_window (cluster, tenant, json): - """Create Maintenance Window""" - response = rh.config_post(cluster, tenant, MZ_ENDPOINT, json=json) - return response.status_code + schedule['recurrence'] = { + "startTime": start_time, + "durationMinutes": duration + } -def update_window (cluster, tenant, window_id, json): - """Update Maintenance Window""" - response = rh.config_put(cluster, tenant, MZ_ENDPOINT + window_id, json=json) - return response.status_code + # Check Weekly Day + if recurrence_type == "WEEKLY": + day = str(day).upper() + if day in DayOfWeek._member_names_: + schedule['recurrence']['dayOfWeek'] = day + else: + raise ValueError("Invalid Weekly Day! Allowed values are " + + "SUNDAY, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY") -def delete_window (cluster, tenant, window_id): - """Delete Maintenance Window""" - response = rh.config_delete(cluster, tenant, MZ_ENDPOINT + window_id) - return response.status_code - -def get_windows (cluster, tenant): - """Return List of Maintenance Windows in Effect""" - response = rh.config_get(cluster, tenant, MZ_ENDPOINT) - return response.json() + # Check Monthly Day + if recurrence_type == "MONTHLY": + if not isinstance(day, int): + raise TypeError("Invalid type for Day of Month! Int between 1-31 required") + if (1 <= int(day) <= 31): + schedule['recurrence']['dayOfMonth'] = day + else: + raise ValueError("Invalid Monthly Day! Allowed values are 1-31") + return schedule + + +def create_window(cluster, tenant, json): + """Create Maintenance Window""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + method=rh.HTTP.POST, + endpoint=MZ_ENDPOINT, + json=json) + return response.json() + + +def update_window(cluster, tenant, window_id, json): + """Update Maintenance Window""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + method=rh.HTTP.PUT, + endpoint=f"{MZ_ENDPOINT}/{window_id}", + json=json) + return response.status_code + + +def delete_window(cluster, tenant, window_id): + """Delete Maintenance Window""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + method=rh.HTTP.DELETE, + endpoint=f"{MZ_ENDPOINT}/{window_id}") + return response.status_code + + +def get_windows(cluster, tenant): + """Return List of Maintenance Windows in Effect""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=MZ_ENDPOINT) + return response.json() + + +def get_window(cluster, tenant, window_id): + """Return Maintenance Window Details""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{MZ_ENDPOINT}/{window_id}") + return response.json() -def get_window (cluster, tenant, window_id): - """Return Maintenance Window Details""" - response = rh.config_get(cluster, tenant, MZ_ENDPOINT + window_id) - return response.json() def parse_tag(tag_string): - # Need a way to process literal colon inside a key - "Parsing Tag to to Context, Key and Value" - m = re.match( - r"(?:\[(\w+)\])?([\w\-\/`\+\.\!\@\#\$\%\^\&\*\(\)\?\[\]\{\}\,\<\>\ \:\;]+)(?:\:(\w*))?", - tag_string - ) - tag_dictionary = {} - if m.group(1): - tag_dictionary['context'] = m.group(1) - else: - tag_dictionary['context'] = "CONTEXTLESS" - - tag_dictionary['key'] = m.group(2) # Key is always required - - if m.group(3): - tag_dictionary['value'] = m.group(3) - - return tag_dictionary - \ No newline at end of file + # Need a way to process literal colon inside a key + "Parsing Tag to to Context, Key and Value" + m = re.match( + r"(?:\[(\w+)\])?([\w\-\/`\+\.\!\@\#\$\%\^\&\*\(\)\?\[\]\{\}\,\<\>\ \:\;]+)(?:\:(\w*))?", + tag_string + ) + tag_dictionary = {} + if m.group(1): + tag_dictionary['context'] = m.group(1) + else: + tag_dictionary['context'] = "CONTEXTLESS" + + tag_dictionary['key'] = m.group(2) # Key is always required + + if m.group(3): + tag_dictionary['value'] = m.group(3) + + return tag_dictionary diff --git a/dynatrace/tenant/management_zones.py b/dynatrace/tenant/management_zones.py index e0750aa..192c587 100644 --- a/dynatrace/tenant/management_zones.py +++ b/dynatrace/tenant/management_zones.py @@ -5,107 +5,119 @@ import json from dynatrace.requests import request_handler as rh -def generate_mz_payload(application, env_zone=None): - """Create Payload for Management Zone based on Application and Environment""" - with open('../templates/mz_template.json', 'r') as mz_template: - mz_payload = json.load(mz_template) - - mz_payload['name'] = str(application) - # The Template will have - # Service Rules(0), Process Group Rules(1), Application Rules(2), - # Browser Monitors(3), HTTP Monitor(4), External Monitors(5), Manually Tagged Services (6), - # Manually Tagged Process Groups (7), Mobile Application (8), Custom Device Groups (9), - # Service and Process Groups are different because they allow Key/Value Pairs - - # TODO Consolidate by checking if Key/Value Pair exists - mz_payload['rules'][0]['conditions'][0]['comparisonInfo']['value']['value'] = str(application) - mz_payload['rules'][1]['conditions'][0]['comparisonInfo']['value']['value'] = str(application) +ENDPOINT = rh.TenantAPIs.MANAGEMENT_ZONES - for rule_num in range(2, 10): - mz_payload['rules'][rule_num]['conditions'][0]['comparisonInfo']['value']['key'] = "APP: " + str(application) - - if env_zone: - # If environment exists, rename MZ and add environment conditions - mz_payload['name'] = str(application) + " - " + str(env_zone) +def generate_mz_payload(application, env_zone=None): + """Create Payload for Management Zone based on Application and Environment""" + with open('../templates/mz_template.json', 'r') as mz_template: + mz_payload = json.load(mz_template) + + mz_payload['name'] = str(application) + # The Template will have + # Service Rules(0), Process Group Rules(1), Application Rules(2), + # Browser Monitors(3), HTTP Monitor(4), External Monitors(5), Manually Tagged Services (6), + # Manually Tagged Process Groups (7), Mobile Application (8), Custom Device Groups (9), # Service and Process Groups are different because they allow Key/Value Pairs - condition_payload = copy.deepcopy(mz_payload['rules'][0]['conditions'][0]) - condition_payload['comparisonInfo']['value']['key'] = "ENV" - condition_payload['comparisonInfo']['value']['value'] = str(env_zone) - mz_payload['rules'][0]['conditions'].append(condition_payload) - - del condition_payload - condition_payload = copy.deepcopy(mz_payload['rules'][1]['conditions'][0]) - condition_payload['comparisonInfo']['value']['key'] = "ENV" - condition_payload['comparisonInfo']['value']['value'] = str(env_zone) - mz_payload['rules'][1]['conditions'].append(condition_payload) - # Application, Browser Monitors, HTTP Monitor, External Monitors (in that order) + + # TODO Consolidate by checking if Key/Value Pair exists + mz_payload['rules'][0]['conditions'][0]['comparisonInfo']['value']['value'] = str( + application) + mz_payload['rules'][1]['conditions'][0]['comparisonInfo']['value']['value'] = str( + application) for rule_num in range(2, 10): - del condition_payload - condition_payload = copy.deepcopy(mz_payload['rules'][rule_num]['conditions'][0]) - condition_payload['comparisonInfo']['value']['key'] = "ENV: " + str(env_zone) - mz_payload['rules'][rule_num]['conditions'].append(condition_payload) + mz_payload['rules'][rule_num]['conditions'][0]['comparisonInfo']['value']['key'] = "APP: " + \ + str(application) + + if env_zone: + # If environment exists, rename MZ and add environment conditions + mz_payload['name'] = str(application) + " - " + str(env_zone) + + # Service and Process Groups are different because they allow Key/Value Pairs + condition_payload = copy.deepcopy( + mz_payload['rules'][0]['conditions'][0]) + condition_payload['comparisonInfo']['value']['key'] = "ENV" + condition_payload['comparisonInfo']['value']['value'] = str(env_zone) + mz_payload['rules'][0]['conditions'].append(condition_payload) + + del condition_payload + condition_payload = copy.deepcopy( + mz_payload['rules'][1]['conditions'][0]) + condition_payload['comparisonInfo']['value']['key'] = "ENV" + condition_payload['comparisonInfo']['value']['value'] = str(env_zone) + mz_payload['rules'][1]['conditions'].append(condition_payload) + # Application, Browser Monitors, HTTP Monitor, External Monitors (in that order) + + for rule_num in range(2, 10): + del condition_payload + condition_payload = copy.deepcopy( + mz_payload['rules'][rule_num]['conditions'][0]) + condition_payload['comparisonInfo']['value']['key'] = "ENV: " + \ + str(env_zone) + mz_payload['rules'][rule_num]['conditions'].append( + condition_payload) + + return mz_payload - return mz_payload def add_management_zone(cluster, tenant, application, env_zone=None): - """Add Management Zone based on Application and Environment""" - mz_payload = generate_mz_payload(application, env_zone) - - response = rh.config_post( - cluster, - tenant, - '/managementZones', - json=mz_payload - ) - if "id" in response.json(): - return (response.json())['id'] - else: - return (response.text) + """Add Management Zone based on Application and Environment""" + mz_payload = generate_mz_payload(application, env_zone) + + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + method=rh.HTTP.POST, + endpoint=ENDPOINT, + json=mz_payload) + if "id" in response.json(): + return (response.json())['id'] + else: + return (response.text) + def change_management_zone(cluster, tenant, mz_id, application, env_zone=None): - """Add Management Zone based on Application and Environment""" - mz_payload = generate_mz_payload(application, env_zone) + """Add Management Zone based on Application and Environment""" + mz_payload = generate_mz_payload(application, env_zone) + + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + method=rh.HTTP.PUT, + endpoint=f"{ENDPOINT}/{mz_id}", + json=mz_payload) + print(response.status_code) - response = rh.config_put( - cluster, - tenant, - 'managementZones/' + str(mz_id), - json=mz_payload - ) - print(response.status_code) def delete_management_zone_by_id(cluster, tenant, mz_id): - """Delete Management Zone by Management Zone ID""" - response = rh.config_delete( - cluster, - tenant, - "managementZones/" + str(mz_id), - ) - print(response.status_code) + """Delete Management Zone by Management Zone ID""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + method=rh.HTTP.DELETE, + endpoint=f"{ENDPOINT}/{mz_id}") + print(response.status_code) + def delete_management_zone_by_name(cluster, tenant, mz_name): - """Delete Management Zone by Management Zone Name""" - #TODO This function - return "TODO " + cluster + tenant + mz_name + """Delete Management Zone by Management Zone Name""" + # TODO This function + return "TODO " + cluster + tenant + mz_name + def get_management_zone_list(cluster, tenant): - """Get all Management Zones in Environment""" - #TODO Cache Management Zone list for Env, and add a cleanup script to remove after run. - response = rh.config_get( - cluster, - tenant, - "managementZones", - ) - mz_list_raw = response.json() - return mz_list_raw['values'] + """Get all Management Zones in Environment""" + # TODO Cache Management Zone list for Env, and add a cleanup script to remove after run. + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT) + mz_list_raw = response.json() + return mz_list_raw['values'] + def get_management_zone_id(cluster, tenant, mz_name): - """Get Management Zone ID of Management Zone Name""" - mz_list = get_management_zone_list(cluster, tenant) + """Get Management Zone ID of Management Zone Name""" + mz_list = get_management_zone_list(cluster, tenant) - for m_zone in mz_list: - if m_zone['name'] == mz_name: - return m_zone['id'] - return None + for m_zone in mz_list: + if m_zone['name'] == mz_name: + return m_zone['id'] + return None diff --git a/dynatrace/tenant/metrics.py b/dynatrace/tenant/metrics.py new file mode 100644 index 0000000..ed78db9 --- /dev/null +++ b/dynatrace/tenant/metrics.py @@ -0,0 +1,24 @@ +from dynatrace.requests import request_handler as rh + +ENDPOINT = rh.TenantAPIs.METRICS + + +def get_metrics(cluster, tenant, params=None): + """Gets the list of metrics and their details""" + next_page_key = 1 + metrics = [] + + while next_page_key: + # Upon subsequent calls, clear all other params + if next_page_key != 1: + params = dict(nextPageKey=next_page_key) + + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + params=params) + + metrics.extend(response.json().get('metrics')) + next_page_key = response.json().get('nextPageKey') + + return metrics diff --git a/dynatrace/tenant/request_attributes.py b/dynatrace/tenant/request_attributes.py index 7d4d6bb..d1340ac 100644 --- a/dynatrace/tenant/request_attributes.py +++ b/dynatrace/tenant/request_attributes.py @@ -3,64 +3,73 @@ import json from dynatrace.requests import request_handler as rh -ENDPOINT = "/service/requestAttributes/" +ENDPOINT = rh.TenantAPIs.REQUEST_ATTRIBUTES + def pull_to_files(cluster, tenant, ignore_disabled=True): - """Pull files from an environment to local""" - # API Calls needed: Pull RA, take the ID and pull the details of each RA - all_ra_call = rh.config_get(cluster, tenant, ENDPOINT) - all_ra_json = all_ra_call.json() - all_ra_json = all_ra_json['values'] - #print (json.dumps(all_ra_json, indent=2)) - ra_file_list = [] - for request_attribute in all_ra_json: - single_ra_call = rh.config_get( - cluster, - tenant, - ENDPOINT + str(request_attribute['id']) - ) - if single_ra_call.status_code == 200: - single_ra_json = single_ra_call.json() - if single_ra_json['enabled'] and ignore_disabled: - single_ra_json.pop("metadata") - single_ra_json.pop("id") - ra_file_name = "jsons/request_attributes/" + str(single_ra_json['name']) + ".json" - with open(ra_file_name, 'w') as current_file: - json.dump(single_ra_json, current_file, indent=2) - ra_file_list.append(ra_file_name) - else: - print (single_ra_call.status_code) - return ra_file_list + """Pull files from an environment to local""" + # API Calls needed: Pull RA, take the ID and pull the details of each RA + all_ra_call = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT) + all_ra_json = all_ra_call.json() + all_ra_json = all_ra_json['values'] + # print (json.dumps(all_ra_json, indent=2)) + ra_file_list = [] + for request_attribute in all_ra_json: + single_ra_call = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{request_attribute['id']}") + if single_ra_call.status_code == 200: + single_ra_json = single_ra_call.json() + if single_ra_json['enabled'] and ignore_disabled: + single_ra_json.pop("metadata") + single_ra_json.pop("id") + ra_file_name = "jsons/request_attributes/" + \ + str(single_ra_json['name']) + ".json" + with open(ra_file_name, 'w') as current_file: + json.dump(single_ra_json, current_file, indent=2) + ra_file_list.append(ra_file_name) + else: + print(single_ra_call.status_code) + return ra_file_list + def push_from_files(file_list, cluster, tenant): - """Push Request Attributes in JSONs to a tenant""" - - # Checks for Existing RAs to update them put request rather than a post that would fail - existing_ra_get = rh.config_get(cluster, tenant, ENDPOINT) - existing_ra_json = existing_ra_get.json() - existing_ra_json = existing_ra_json['values'] - existing_ra_list = {} - for existing_ra in existing_ra_json: - existing_ra_list["jsons/request_attributes/" + str(existing_ra['name']) + ".json"] = existing_ra['id'] + """Push Request Attributes in JSONs to a tenant""" + + # Checks for Existing RAs to update them put request rather than a post that would fail + existing_ra_get = rh.make_api_call(cluster=cluster, tenant=tenant, endpoint=ENDPOINT) + existing_ra_json = existing_ra_get.json() + existing_ra_json = existing_ra_json['values'] + existing_ra_list = {} + for existing_ra in existing_ra_json: + existing_ra_list["jsons/request_attributes/" + + str(existing_ra['name']) + ".json"] = existing_ra['id'] - for file in file_list: - with open(file, 'r') as ra_file: - ra_json = json.load(ra_file) - if file in existing_ra_list: - single_ra_post = rh.config_put( - cluster, - tenant, - ENDPOINT + existing_ra_list[file], - json=ra_json - ) - else: - single_ra_post = rh.config_post( - cluster, - tenant, - ENDPOINT, - json=ra_json - ) - if single_ra_post.status_code >= 400: - print("Error with " + file + ". Status Code: " + str(single_ra_post.status_code)) - else: - print("Success " + file + " " + single_ra_post.text) + for file in file_list: + with open(file, 'r') as ra_file: + ra_json = json.load(ra_file) + if file in existing_ra_list: + single_ra_post = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.PUT, + endpoint=f"{ENDPOINT}/{existing_ra_list[file]}", + json=ra_json + ) + else: + single_ra_post = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.POST, + endpoint=ENDPOINT, + json=ra_json + ) + if single_ra_post.status_code >= 400: + # NOTE: what about the check response in req handler!? + # That will throw an exception first, which this should except + print("Error with " + file + ". Status Code: " + + str(single_ra_post.status_code)) + else: + print("Success " + file + " " + single_ra_post.text) diff --git a/dynatrace/tenant/request_naming.py b/dynatrace/tenant/request_naming.py index b62f67c..e284524 100644 --- a/dynatrace/tenant/request_naming.py +++ b/dynatrace/tenant/request_naming.py @@ -5,43 +5,48 @@ import json from dynatrace.requests import request_handler as rh +ENDPOINT = rh.TenantAPIs.REQUEST_NAMING + + def pull_to_files(cluster, tenant, ignore_disabled=True): - """Pull Service Naming Rules to Files""" - all_rules_call = rh.config_get(cluster, tenant, "/service/requestNaming") - all_rules_list = all_rules_call.json() - all_rules_list = all_rules_list['values'] - # print (json.dumps(all_rules_list, indent=2)) - - rules_file_list = [] - rule_num = 0 - for naming_rule in all_rules_list: - rule_call = rh.config_get( - cluster, - tenant, - "/service/requestNaming/" + str(naming_rule['id']) - ) - if rule_call.status_code == 200: - rule_json = rule_call.json() - if rule_json['enabled'] and ignore_disabled: - rule_json.pop('metadata') - rule_json.pop('id') - rule_file_name = "jsons/request_naming/" + str(rule_num) + ".json" - with open(rule_file_name, 'w') as current_file: - json.dump(rule_json, current_file, indent=2) - rules_file_list.append(rule_file_name) - else: - print (rule_call.status_code) - rule_num = rule_num + 1 - return rules_file_list + """Pull Service Naming Rules to Files""" + all_rules_call = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT) + all_rules_list = all_rules_call.json() + all_rules_list = all_rules_list['values'] + # print (json.dumps(all_rules_list, indent=2)) + + rules_file_list = [] + rule_num = 0 + for naming_rule in all_rules_list: + rule_call = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{naming_rule['id']}") + if rule_call.status_code == 200: + rule_json = rule_call.json() + if rule_json['enabled'] and ignore_disabled: + rule_json.pop('metadata') + rule_json.pop('id') + rule_file_name = f"jsons/request_naming/{rule_num}.json" + with open(rule_file_name, 'w') as current_file: + json.dump(rule_json, current_file, indent=2) + rules_file_list.append(rule_file_name) + else: + print(rule_call.status_code) + rule_num = rule_num + 1 + return rules_file_list + def push_from_files(file_list, cluster, tenant): - """Push Service Naming Rules from Files""" - #TODO add safeties - for file_name in file_list: - print (file_name) - -def generate_file_list (): - file_list = os.listdir("./jsons/request_naming/") - for f in file_list: - print(str.isdigit(f)) - # print(file_list.sort(key=lambda f: filter(str.isdigit, f))) + """Push Service Naming Rules from Files""" + # TODO add safeties + for file_name in file_list: + print(file_name) + + +def generate_file_list(): + file_list = os.listdir("./jsons/request_naming/") + for f in file_list: + print(str.isdigit(f)) + # print(file_list.sort(key=lambda f: filter(str.isdigit, f))) diff --git a/dynatrace/tenant/timeseries.py b/dynatrace/tenant/timeseries.py new file mode 100644 index 0000000..be9e8ce --- /dev/null +++ b/dynatrace/tenant/timeseries.py @@ -0,0 +1,34 @@ +from dynatrace.requests import request_handler as rh + +ENDPOINT = "timeseries/" + + +def get_timeseries_list(cluster, tenant, params=None): + """Get List of Timeseries Metics""" + response = rh.make_api_call(cluster, tenant, ENDPOINT, params=params) + return response.json() + + +def get_timeseries_metric(cluster, tenant, metric, params=None): + """Get Timeseries Metric""" + # Chose to do GET, but could also be done as POST. Don't think there are any advantages to post + response = rh.make_api_call(cluster, tenant, ENDPOINT + metric, params=params) + return response.json() + + +def create_custom_metric(cluster, tenant, metric, json, params=None): + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}{metric}", + params=params, + method=rh.HTTP.PUT, + json=json) + return response.status_code + + +def delete_custom_metic(cluster, tenant, metric): + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + method=rh.HTTP.DELETE, + endpoint=f"{ENDPOINT}{metric}") + return response.status_code diff --git a/dynatrace/tenant/topology/__init__.py b/dynatrace/tenant/topology/__init__.py new file mode 100644 index 0000000..17f1f19 --- /dev/null +++ b/dynatrace/tenant/topology/__init__.py @@ -0,0 +1,7 @@ +from dynatrace.tenant.topology.applications import * +from dynatrace.tenant.topology.custom import * +from dynatrace.tenant.topology.hosts import * +from dynatrace.tenant.topology.process import * +from dynatrace.tenant.topology.process_groups import * +from dynatrace.tenant.topology.services import * +from dynatrace.tenant.topology.shared import * diff --git a/dynatrace/tenant/topology/applications.py b/dynatrace/tenant/topology/applications.py new file mode 100644 index 0000000..083ee34 --- /dev/null +++ b/dynatrace/tenant/topology/applications.py @@ -0,0 +1,82 @@ +"""Application operations from the Dynatrace API""" +# Applications needs a seperate definition since the url is not the same (not /infrastructre/) +from dynatrace.requests import request_handler as rh + +ENDPOINT = f"{rh.TenantAPIs.V1_TOPOLOGY}/applications" + + +def get_applications_tenantwide(cluster, tenant): + """Get Information for all applications in a tenant""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT) + return response.json() + + +def get_application(cluster, tenant, entity): + """Get Information on one application for in a tenant""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{entity}") + return response.json() + + +def set_application_properties(cluster, tenant, entity, prop_json): + """Update properties of application entity""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{entity}", + method=rh.HTTP.POST, + json=prop_json) + return response.json() + + +def get_application_count_tenantwide(cluster, tenant): + """Get total count for all applications in a tenant""" + params = { + "relativeTime": "day", + "includeDetails": "false" + } + + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=ENDPOINT, + params=params) + env_app_count = len(response.json()) + return env_app_count + + +def get_application_count_clusterwide(cluster): + """Get total count for all applications in cluster""" + cluster_app_count = 0 + for env_key in cluster['tenant']: + cluster_app_count = cluster_app_count \ + + get_application_count_tenantwide(cluster, + env_key) + return cluster_app_count + + +def get_application_count_setwide(full_set): + full_set_app_count = 0 + for cluster_items in full_set.values(): + full_set_app_count = full_set_app_count \ + + get_application_count_clusterwide(cluster_items) + return full_set_app_count + + +def add_application_tags(cluster, tenant, entity, tag_list): + """Add tags to application""" + if tag_list is None: + raise TypeError("tag_list cannot be None type") + tag_json = { + 'tags': tag_list + } + return set_application_properties(cluster, tenant, entity, tag_json) + + +def get_application_baseline(cluster, tenant, entity): + """Get baselines on one application for in a tenant""" + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{ENDPOINT}/{entity}/baseline") + return response.json() diff --git a/dynatrace/tenant/topology/custom.py b/dynatrace/tenant/topology/custom.py new file mode 100644 index 0000000..658f62f --- /dev/null +++ b/dynatrace/tenant/topology/custom.py @@ -0,0 +1,6 @@ +import dynatrace.tenant.topology.shared as topology_shared + + +def set_custom_properties(cluster, tenant, entity, prop_json): + """Update properties of process_group entity""" + return topology_shared.set_env_layer_properties(cluster, tenant, 'custom', entity, prop_json) diff --git a/dynatrace/tenant/topology/hosts.py b/dynatrace/tenant/topology/hosts.py new file mode 100644 index 0000000..822725c --- /dev/null +++ b/dynatrace/tenant/topology/hosts.py @@ -0,0 +1,75 @@ +"""Host operations from the Dynatrace API""" +import dynatrace.tenant.topology.shared as topology_shared +from dynatrace.requests import request_handler as rh + + +def get_hosts_tenantwide(cluster, tenant, params=None): + """Get Information for all hosts in a tenant""" + return topology_shared.get_env_layer_entities(cluster, tenant, 'hosts', params=params) + + +def get_host(cluster, tenant, entity, params=None): + """Get Information on one host for in a tenant""" + return topology_shared.get_env_layer_entity(cluster, tenant, 'hosts', entity, params=params) + + +def set_host_properties(cluster, tenant, entity, prop_json): + """Update properties of host entity""" + return topology_shared.set_env_layer_properties(cluster, tenant, 'hosts', entity, prop_json) + + +def get_host_count_tenantwide(cluster, tenant, params=None): + """Get total count for all hosts in a tenant""" + return topology_shared.get_env_layer_count(cluster, tenant, 'hosts', params=params) + + +def get_host_count_clusterwide(cluster, params=None): + """Get total count for all hosts in cluster""" + return topology_shared.get_cluster_layer_count(cluster, 'hosts', params=params) + + +def get_host_count_setwide(full_set, params=None): + """Get total count of hosts for all clusters definied in variable file""" + return topology_shared.get_set_layer_count(full_set, 'hosts', params=params) + + +def add_host_tags(cluster, tenant, entity, tag_list): + """Add tags to host""" + return topology_shared.add_env_layer_tags(cluster, tenant, 'hosts', entity, tag_list) + + +def delete_host_tag(cluster, tenant, entity, tag): + """Remove single tag from host""" + if tag is None: + raise TypeError("Tag cannot be None!") + return rh.make_api_call(cluster=cluster, + tenant=tenant, + method=rh.HTTP.DELETE, + endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/infrastructure/hosts/{entity}/tags/{tag}") + + +def get_host_units_tenantwide(cluster, tenant, params=None): + consumed_host_units = 0 + host_list = get_hosts_tenantwide(cluster, tenant, params=params) + for host in host_list: + consumed_host_units = consumed_host_units + host['consumedHostUnits'] + return consumed_host_units + + +def get_oneagents_tenantwide(cluster, tenant, params=None): + oneagents = [] + next_page_key = 1 + + while next_page_key: + if next_page_key != 1: + params['nextPageKey'] = next_page_key + + response = rh.make_api_call(cluster=cluster, + endpoint=rh.TenantAPIs.ONEAGENTS, + tenant=tenant, + params=params) + + oneagents.extend(response.json().get('hosts')) + next_page_key = response.json().get('nextPageKey') + + return oneagents diff --git a/dynatrace/tenant/topology/process.py b/dynatrace/tenant/topology/process.py new file mode 100644 index 0000000..fb6c523 --- /dev/null +++ b/dynatrace/tenant/topology/process.py @@ -0,0 +1,12 @@ +"""Process operations from the Dynatrace API""" +import dynatrace.tenant.topology.shared as topology_shared + + +def get_processes_tenantwide(cluster, tenant, params=None): + """Get Information for all processes in a tenant""" + return topology_shared.get_env_layer_entities(cluster, tenant, 'processes', params=params) + + +def get_process(cluster, tenant, entity, params=None): + """Get Information on one process for in a tenant""" + return topology_shared.get_env_layer_entity(cluster, tenant, 'processes', entity, params=params) diff --git a/dynatrace/tenant/topology/process_groups.py b/dynatrace/tenant/topology/process_groups.py new file mode 100644 index 0000000..e45d912 --- /dev/null +++ b/dynatrace/tenant/topology/process_groups.py @@ -0,0 +1,37 @@ +"""Process Group operations from the Dynatrace API""" +import dynatrace.tenant.topology.shared as topology_shared + + +def get_process_groups_tenantwide(cluster, tenant): + """Get Information for all process-groups in a tenant""" + return topology_shared.get_env_layer_entities(cluster, tenant, 'process-groups') + + +def get_process_group(cluster, tenant, entity): + """Get Information on one process-group for in a tenant""" + return topology_shared.get_env_layer_entity(cluster, tenant, 'process-groups', entity) + + +def set_process_group_properties(cluster, tenant, entity, prop_json): + """Update properties of process-group entity""" + return topology_shared.set_env_layer_properties(cluster, tenant, 'process-groups', entity, prop_json) + + +def get_process_group_count_tenantwide(cluster, tenant, params=None): + """Get total count for all process-groups in a tenant""" + return topology_shared.get_env_layer_count(cluster, tenant, 'process-groups', params=params) + + +def get_process_group_count_clusterwide(cluster, params=None): + """Get total count for all process-groups in cluster""" + return topology_shared.get_cluster_layer_count(cluster, 'process-groups', params=params) + + +def get_process_group_count_setwide(full_set, params=None): + """Get total count of process-groups for all clusters defined in variable file""" + return topology_shared.get_set_layer_count(full_set, 'process-groups', params=params) + + +def add_process_group_tags(cluster, tenant, entity, tag_list): + """Add tags to a process group""" + return topology_shared.add_env_layer_tags(cluster, tenant, 'process-groups', entity, tag_list) diff --git a/dynatrace/tenant/topology/services.py b/dynatrace/tenant/topology/services.py new file mode 100644 index 0000000..6b4fe58 --- /dev/null +++ b/dynatrace/tenant/topology/services.py @@ -0,0 +1,37 @@ +"""Service operations from the Dynatrace API""" +import dynatrace.tenant.topology.shared as topology_shared + + +def get_services_tenantwide(cluster, tenant): + """Get Information for all services in a tenant""" + return topology_shared.get_env_layer_entities(cluster, tenant, 'services') + + +def get_service(cluster, tenant, entity): + """Get Information on one service for in a tenant""" + return topology_shared.get_env_layer_entity(cluster, tenant, 'services', entity) + + +def set_service_properties(cluster, tenant, entity, prop_json): + """Update properties of service entity""" + return topology_shared.set_env_layer_properties(cluster, tenant, 'services', entity, prop_json) + + +def get_service_count_tenantwide(cluster, tenant, params=None): + """Get total count for all services in a tenant""" + return topology_shared.get_env_layer_count(cluster, tenant, 'services', params=params) + + +def get_service_count_clusterwide(cluster, params=None): + """Get total count for all services in cluster""" + return topology_shared.get_cluster_layer_count(cluster, 'services', params=params) + + +def get_service_count_setwide(full_set, params=None): + """Get total count of services for all clusters definied in variable file""" + return topology_shared.get_set_layer_count(full_set, 'services', params=params) + + +def add_service_tags(cluster, tenant, entity, tag_list): + """Add tags to a service""" + return topology_shared.add_env_layer_tags(cluster, tenant, 'services', entity, tag_list) diff --git a/dynatrace/tenant/topology/shared.py b/dynatrace/tenant/topology/shared.py new file mode 100644 index 0000000..32f35ef --- /dev/null +++ b/dynatrace/tenant/topology/shared.py @@ -0,0 +1,138 @@ +"""Shared topology operations for multiple layers from the Dynatrace API""" +from dynatrace.requests import request_handler as rh +# Layer Compatibility +# 1. Get all entities - application, host, process, process group, service +# 1a. Count all entities +# 2. Get specific entity - application, host process, process group, service +# 3. Update properties of entity - application, custom, host, process group, service + +ENDPOINT_SUFFIX = { + 'applications': 'applications', + 'custom': "infrastructure/custom", + 'hosts': "infrastructure/hosts", + 'processes': "infrastructure/processes", + 'process-groups': "infrastructure/process-groups", + 'services': "infrastructure/services" +} + + +def check_valid_layer(layer, layer_list): + """Check if the operation is valid for the layer""" + if layer is None or layer_list is None: + raise TypeError('Provide layer and layer_list!') + if layer not in layer_list: + raise ValueError( + layer + " layer does not exist or is invalid for this use!") + + +def get_env_layer_entities(cluster, tenant, layer, params=None): + """Get all Entities of Specified Layer""" + layer_list = ['applications', 'hosts', + 'processes', 'process-groups', 'services'] + check_valid_layer(layer, layer_list) + + if not params: + params = {} + + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/{ENDPOINT_SUFFIX[layer]}", + params=params + ) + return response.json() + + +def get_env_layer_entity(cluster, tenant, layer, entity, params=None): + """Get Entity Information for Specified Layer""" + layer_list = ['applications', 'hosts', + 'processes', 'process-groups', 'services'] + check_valid_layer(layer, layer_list) + + if not params: + params = {} + + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/{ENDPOINT_SUFFIX[layer]}/{entity}", + params=params + ) + return response.json() + + +def set_env_layer_properties(cluster, tenant, layer, entity, prop_json): + """Update Properties of Entity""" + layer_list = ['applications', 'custom', + 'hosts', 'process-groups', 'services'] + check_valid_layer(layer, layer_list) + response = rh.make_api_call( + cluster=cluster, + tenant=tenant, + method=rh.HTTP.POST, + endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/{ENDPOINT_SUFFIX[layer]}/{entity}", + json=prop_json + ) + return response.status_code + + +def get_env_layer_count(cluster, tenant, layer, params=None): + """Get total hosts in an environment""" + if not params: + params = {} + + layer_list = ['applications', 'hosts', + 'processes', 'process-groups', 'services'] + + if 'relativeTime' not in params.keys(): + params['relativeTime'] = "day" + if 'includeDetails' not in params.keys(): + params['includeDetails'] = False + + check_valid_layer(layer, layer_list) + response = rh.make_api_call(cluster=cluster, + tenant=tenant, + endpoint=f"{rh.TenantAPIs.V1_TOPOLOGY}/{ENDPOINT_SUFFIX[layer]}", + params=params) + env_layer_count = len(response.json()) + return env_layer_count + + +def get_cluster_layer_count(cluster, layer, params=None): + """Get total count for all environments in cluster""" + + if not params: + params = {} + + cluster_layer_count = 0 + for env_key in cluster['tenant']: + cluster_layer_count += get_env_layer_count(cluster=cluster, + tenant=env_key, + layer=layer, + params=params) + return cluster_layer_count + + +def get_set_layer_count(full_set, layer, params=None): + """Get total count for all clusters definied in variable file""" + if not params: + params = {} + + full_set_layer_count = 0 + for cluster in full_set.values(): + full_set_layer_count += get_cluster_layer_count(cluster, + layer, + params) + return full_set_layer_count + + +def add_env_layer_tags(cluster, tenant, layer, entity, tag_list): + layer_list = ['applications', 'hosts', + 'custom', 'process-groups', 'services'] + check_valid_layer(layer, layer_list) + if not tag_list: + raise TypeError("tag_list cannot be None type") + tag_json = { + 'tags': tag_list + } + return set_env_layer_properties(cluster, tenant, layer, entity, tag_json) \ No newline at end of file diff --git a/dynatrace/timeseries/timeseries.py b/dynatrace/timeseries/timeseries.py deleted file mode 100644 index 804f2b4..0000000 --- a/dynatrace/timeseries/timeseries.py +++ /dev/null @@ -1,22 +0,0 @@ -from dynatrace.requests import request_handler as rh - -ENDPOINT = "timeseries/" - -def get_timeseries_list(cluster, tenant, params=None): - """Get List of Timeseries Metics""" - response = rh.env_get(cluster, tenant, ENDPOINT, params=params) - return response.json() - -def get_timeseries_metric (cluster, tenant, metric, params=None): - """Get Timeseries Metric""" - #Chose to do GET, but could also be done as POST. Don't think there are any advantages to post - response = rh.env_get(cluster, tenant, ENDPOINT + metric, params=params) - return response.json() - -def create_custom_metric (cluster, tenant, metric, json, params=None): - response = rh.env_put(cluster, tenant, ENDPOINT + metric, params=params, json=json) - return response.status_code - -def delete_custom_metic (cluster, tenant, metric): - response = rh.env_delete(cluster, tenant, ENDPOINT + metric) - return response.status_code \ No newline at end of file diff --git a/dynatrace/topology/applications.py b/dynatrace/topology/applications.py deleted file mode 100644 index 1581402..0000000 --- a/dynatrace/topology/applications.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Application operations from the Dynatrace API""" -# Applications needs a seperate definition since the url is not the same (not /infrastructre/) -from dynatrace.requests import request_handler as rh - -ENDPOINT = "entity/applications/" - -def get_applications_tenantwide(cluster, tenant): - """Get Information for all applications in a tenant""" - response = rh.env_get(cluster, tenant, ENDPOINT) - return response.json() - -def get_application(cluster, tenant, entity): - """Get Information on one application for in a tenant""" - response = rh.env_get(cluster, tenant, ENDPOINT + entity) - return response.json() - -def set_application_properties(cluster, tenant, entity, prop_json): - """Update properties of application entity""" - response = rh.env_post(cluster, tenant, ENDPOINT + entity, json=prop_json) - return response.json() - -def get_application_count_tenantwide(cluster, tenant): - """Get total count for all applications in a tenant""" - params = { - "relativeTime" : "day", - "includeDetails" : "false" - } - - response = rh.env_get(cluster, tenant, ENDPOINT, params=params) - env_app_count = len(response.json()) - return env_app_count - -def get_application_count_clusterwide(cluster): - """Get total count for all applications in cluster""" - cluster_app_count = 0 - for env_key in cluster['tenant']: - cluster_app_count = cluster_app_count + get_application_count_tenantwide( - cluster, - env_key - ) - return cluster_app_count - -def get_application_count_setwide(full_set): - full_set_app_count = 0 - for cluster_items in full_set.values(): - full_set_app_count = full_set_app_count + get_application_count_clusterwide(cluster_items) - return full_set_app_count - -def add_application_tags (cluster, tenant, entity, tag_list): - """Add tags to application""" - if tag_list is None: - raise Exception ("tag_list cannot be None type") - tag_json = { - 'tags' : tag_list - } - return set_application_properties(cluster, tenant, entity, tag_json) - -def get_application_baseline(cluster, tenant, entity): - """Get baselines on one application for in a tenant""" - response = rh.env_get(cluster, tenant, ENDPOINT + entity + "/baseline") - return response.json() diff --git a/dynatrace/topology/custom.py b/dynatrace/topology/custom.py deleted file mode 100644 index bf5c43e..0000000 --- a/dynatrace/topology/custom.py +++ /dev/null @@ -1,5 +0,0 @@ -import dynatrace.topology.shared as topology_shared - -def set_custom_properties(cluster, tenant, entity, prop_json): - """Update properties of process_group entity""" - return topology_shared.set_env_layer_properties(cluster, tenant, 'custom', entity, prop_json) diff --git a/dynatrace/topology/hosts.py b/dynatrace/topology/hosts.py deleted file mode 100644 index 009fa35..0000000 --- a/dynatrace/topology/hosts.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Host operations from the Dynatrace API""" -import dynatrace.topology.shared as topology_shared -from dynatrace.requests import request_handler as rh - -def get_hosts_tenantwide(cluster, tenant, params=None): - """Get Information for all hosts in a tenant""" - return topology_shared.get_env_layer_entities(cluster, tenant, 'hosts', params=params) - -def get_host(cluster, tenant, entity, params=None): - """Get Information on one host for in a tenant""" - return topology_shared.get_env_layer_entity(cluster, tenant,'hosts', entity, params=params) - -def set_host_properties(cluster, tenant, entity, prop_json): - """Update properties of host entity""" - return topology_shared.set_env_layer_properties(cluster, tenant, 'hosts', entity, prop_json) - -def get_host_count_tenantwide(cluster, tenant, params=None): - """Get total count for all hosts in a tenant""" - return topology_shared.get_env_layer_count(cluster, tenant, 'hosts', params=params) - -def get_host_count_clusterwide(cluster, params=None): - """Get total count for all hosts in cluster""" - return topology_shared.get_cluster_layer_count(cluster, 'hosts', params=params) - -def get_host_count_setwide(full_set, params=None): - """Get total count of hosts for all clusters definied in variable file""" - return topology_shared.get_set_layer_count(full_set, 'hosts', params=params) - -def add_host_tags (cluster, tenant, entity, tag_list): - """Add tags to host""" - return topology_shared.add_env_layer_tags (cluster, tenant, 'hosts', entity, tag_list) - -def delete_host_tag (cluster, tenant, entity, tag): - """Remove single tag from host""" - if tag is None: - raise Exception ("Tag cannot be None!") - return rh.env_delete(cluster, tenant, "entity/infrastructure/hosts/" + entity + "/tags/" + str(tag)) - -def get_host_units_tenantwide(cluster, tenant, params=None): - consumed_host_units = 0 - host_list = get_hosts_tenantwide (cluster, tenant, params=params) - for host in host_list: - consumed_host_units = consumed_host_units + host['consumedHostUnits'] - return consumed_host_units \ No newline at end of file diff --git a/dynatrace/topology/process.py b/dynatrace/topology/process.py deleted file mode 100644 index 7b9aeca..0000000 --- a/dynatrace/topology/process.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Process operations from the Dynatrace API""" -import dynatrace.topology.shared as topology_shared -from dynatrace.requests import request_handler as rh - -def get_processes_tenantwide(cluster, tenant, params=None): - """Get Information for all processes in a tenant""" - return topology_shared.get_env_layer_entities(cluster, tenant, 'processes', params=params) - -def get_process(cluster, tenant, entity, params=None): - """Get Information on one process for in a tenant""" - return topology_shared.get_env_layer_entity(cluster, tenant,'processes', entity, params=params) \ No newline at end of file diff --git a/dynatrace/topology/process_groups.py b/dynatrace/topology/process_groups.py deleted file mode 100644 index 5ff9692..0000000 --- a/dynatrace/topology/process_groups.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Process Group operations from the Dynatrace API""" -import dynatrace.topology.shared as topology_shared -from dynatrace.requests import request_handler as rh - -def get_process_groups_tenantwide(cluster, tenant): - """Get Information for all process-groups in a tenant""" - return topology_shared.get_env_layer_entities(cluster, tenant, 'process-groups') - -def get_process_group(cluster, tenant, entity): - """Get Information on one process-group for in a tenant""" - return topology_shared.get_env_layer_entity(cluster, tenant,'process-groups', entity) - -def set_process_group_properties(cluster, tenant, entity, prop_json): - """Update properties of process-group entity""" - return topology_shared.set_env_layer_properties(cluster, tenant, 'process-groups', entity, prop_json) - -def get_process_group_count_tenantwide(cluster, tenant, params=None): - """Get total count for all process-groups in a tenant""" - return topology_shared.get_env_layer_count(cluster, tenant, 'process-groups', params=params) - -def get_process_group_count_clusterwide(cluster, params=None): - """Get total count for all process-groups in cluster""" - return topology_shared.get_cluster_layer_count(cluster, 'process-groups', params=params) - -def get_process_group_count_setwide(full_set, params=None): - """Get total count of process-groups for all clusters defined in variable file""" - return topology_shared.get_set_layer_count(full_set, 'process-groups', params=params) - -def add_process_group_tags (cluster, tenant, entity, tag_list): - """Add tags to a process group""" - return topology_shared.add_env_layer_tags (cluster, tenant, 'process-groups', entity, tag_list) \ No newline at end of file diff --git a/dynatrace/topology/services.py b/dynatrace/topology/services.py deleted file mode 100644 index 5c4abc4..0000000 --- a/dynatrace/topology/services.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Service operations from the Dynatrace API""" -import dynatrace.topology.shared as topology_shared -from dynatrace.requests import request_handler as rh - -def get_services_tenantwide(cluster, tenant): - """Get Information for all services in a tenant""" - return topology_shared.get_env_layer_entities(cluster, tenant, 'services') - -def get_service(cluster, tenant, entity): - """Get Information on one service for in a tenant""" - return topology_shared.get_env_layer_entity(cluster, tenant,'services', entity) - -def set_service_properties(cluster, tenant, entity, prop_json): - """Update properties of service entity""" - return topology_shared.set_env_layer_properties(cluster, tenant, 'services', entity, prop_json) - -def get_service_count_tenantwide(cluster, tenant, params=None): - """Get total count for all services in a tenant""" - return topology_shared.get_env_layer_count(cluster, tenant, 'services', params=params) - -def get_service_count_clusterwide(cluster, params=None): - """Get total count for all services in cluster""" - return topology_shared.get_cluster_layer_count(cluster, 'services', params=params) - -def get_service_count_setwide(full_set, params=None): - """Get total count of services for all clusters definied in variable file""" - return topology_shared.get_set_layer_count(full_set, 'services', params=params) - -def add_service_tags (cluster, tenant, entity, tag_list): - """Add tags to a service""" - return topology_shared.add_env_layer_tags (cluster, tenant, 'services', entity, tag_list) \ No newline at end of file diff --git a/dynatrace/topology/shared.py b/dynatrace/topology/shared.py deleted file mode 100644 index a9498b7..0000000 --- a/dynatrace/topology/shared.py +++ /dev/null @@ -1,100 +0,0 @@ -"""Shared topology operations for multiple layers from the Dynatrace API""" -from dynatrace.requests import request_handler as rh -# Layer Compatibility -# 1. Get all entities - application, host, process, process group, service -# 1a. Count all entities -# 2. Get specific entity - application, host process, process group, service -# 3. Update properties of entity - application, custom, host, process group, service - -ENDPOINT = "entity/infrastructure/" - -def check_valid_layer(layer, layer_list): - """Check if the operation is valid for the layer""" - if layer is None or layer_list is None: - raise Exception ('Provide layer and layer_list!') - if layer not in layer_list: - raise Exception (layer + " layer does not exist or is invalid for this use!") - return - -def get_env_layer_entities(cluster, tenant, layer, params=None): - """Get all Entities of Specified Layer""" - layer_list = ['applications','hosts', 'processes', 'process-groups', 'services'] - check_valid_layer(layer, layer_list) - response = rh.env_get( - cluster, - tenant, - ENDPOINT + layer, - params=params - ) - return response.json() - -def get_env_layer_entity(cluster, tenant, layer, entity, params=None): - """Get Entity Information for Specified Layer""" - layer_list = ['applications','hosts', 'processes', 'process-groups', 'services'] - check_valid_layer(layer, layer_list) - response = rh.env_get( - cluster, - tenant, - ENDPOINT + layer + "/" + entity, - params=params - ) - return response.json() - -def set_env_layer_properties(cluster, tenant, layer, entity, prop_json): - """Update Properties of Entity""" - layer_list = ['applications', 'custom', 'hosts', 'process-groups', 'services'] - check_valid_layer(layer, layer_list) - response = rh.env_post( - cluster, - tenant, - ENDPOINT + layer + "/" + entity, - json=prop_json - ) - return response.status_code - -def get_env_layer_count(cluster, tenant, layer, params=None): - """Get total hosts in an environment""" - - layer_list = ['applications','hosts', 'processes', 'process-groups', 'services'] - if not params: - params = {} - if 'relativeTime' not in params.keys(): - params['relativeTime'] : "day" - if 'includeDetails' not in params.keys(): - params['includeDetails'] : "false" - - check_valid_layer(layer, layer_list) - response = rh.env_get(cluster, tenant, ENDPOINT + layer, params=params) - env_layer_count = len(response.json()) - return env_layer_count - -def get_cluster_layer_count(cluster, layer, params=None): - """Get total count for all environments in cluster""" - cluster_layer_count = 0 - for env_key in cluster['tenant']: - cluster_layer_count = cluster_layer_count + get_env_layer_count( - cluster, - env_key, - layer, - params=params - ) - return cluster_layer_count - -def get_set_layer_count(full_set, layer, params=None): - """Get total count for all clusters definied in variable file""" - full_set_layer_count = 0 - for cluster_items in full_set.values(): - full_set_layer_count = \ - full_set_layer_count + \ - get_cluster_layer_count(cluster_items, layer, params=params) - return full_set_layer_count - -def add_env_layer_tags (cluster, tenant, layer, entity, tag_list): - layer_list = ['applications','hosts', 'custom', 'process-groups', 'services'] - check_valid_layer(layer, layer_list) - if tag_list is None: - raise Exception ("tag_list cannot be None type") - tag_json = { - 'tags' : tag_list - } - return set_env_layer_properties(cluster, tenant, layer, entity, tag_json) \ No newline at end of file diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..3717d26 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,4 @@ +# How To Use + +When running, please use a mockserver defined in your user_variables.py +The name of the mockserver should be "mockserver1". The tenant should be "tenant1" \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/mockserver_payloads/requests/hosts/tags.json b/tests/mockserver_payloads/requests/hosts/tags.json new file mode 100644 index 0000000..5694743 --- /dev/null +++ b/tests/mockserver_payloads/requests/hosts/tags.json @@ -0,0 +1,6 @@ +{ + "tags": [ + "demo", + "example" + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/maintenance/mock_create_daily_1.json b/tests/mockserver_payloads/requests/maintenance/mock_create_daily_1.json new file mode 100644 index 0000000..97a5743 --- /dev/null +++ b/tests/mockserver_payloads/requests/maintenance/mock_create_daily_1.json @@ -0,0 +1,16 @@ +{ + "name":"Test Payload", + "description":"Generating Payload for Test", + "suppression":"DETECT_PROBLEMS_AND_ALERT", + "schedule":{ + "recurrenceType":"DAILY", + "start":"2020-01-01 00:00", + "end":"2020-01-02 00:00", + "zoneId":"America/Chicago", + "recurrence":{ + "startTime":"23:00", + "durationMinutes":60 + } + }, + "type":"PLANNED" + } \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/maintenance/mock_create_daily_multi_tags_and_1.json b/tests/mockserver_payloads/requests/maintenance/mock_create_daily_multi_tags_and_1.json new file mode 100644 index 0000000..7299850 --- /dev/null +++ b/tests/mockserver_payloads/requests/maintenance/mock_create_daily_multi_tags_and_1.json @@ -0,0 +1,28 @@ +{ + "name": "Test Payload", + "description": "Generating Payload for Test", + "suppression": "DETECT_PROBLEMS_AND_ALERT", + "schedule": { + "recurrenceType": "DAILY", + "start": "2020-01-01 00:00", + "end": "2020-01-02 00:00", + "zoneId": "America/Chicago", + "recurrence": { + "startTime": "23:00", + "durationMinutes": 60 + } + }, + "type": "PLANNED", + "scope": { + "entities": [], + "matches": [{ + "tags": [{ + "context": "CONTEXTLESS", + "key": "testing" + }, { + "context": "CONTEXTLESS", + "key": "testing2" + }] + }] + } +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/maintenance/mock_create_daily_multi_tags_or_1.json b/tests/mockserver_payloads/requests/maintenance/mock_create_daily_multi_tags_or_1.json new file mode 100644 index 0000000..77ade49 --- /dev/null +++ b/tests/mockserver_payloads/requests/maintenance/mock_create_daily_multi_tags_or_1.json @@ -0,0 +1,30 @@ +{ + "name": "Test Payload", + "description": "Generating Payload for Test", + "suppression": "DETECT_PROBLEMS_AND_ALERT", + "schedule": { + "recurrenceType": "DAILY", + "start": "2020-01-01 00:00", + "end": "2020-01-02 00:00", + "zoneId": "America/Chicago", + "recurrence": { + "startTime": "23:00", + "durationMinutes": 60 + } + }, + "type": "PLANNED", + "scope": { + "entities": [], + "matches": [{ + "tags": [{ + "context": "CONTEXTLESS", + "key": "testing" + }] + }, { + "tags": [{ + "context": "CONTEXTLESS", + "key": "testing2" + }] + }] + } +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/maintenance/mock_create_daily_single_tag_1.json b/tests/mockserver_payloads/requests/maintenance/mock_create_daily_single_tag_1.json new file mode 100644 index 0000000..63d1348 --- /dev/null +++ b/tests/mockserver_payloads/requests/maintenance/mock_create_daily_single_tag_1.json @@ -0,0 +1,25 @@ +{ + "name": "Test Payload", + "description": "Generating Payload for Test", + "suppression": "DETECT_PROBLEMS_AND_ALERT", + "schedule": { + "recurrenceType": "DAILY", + "start": "2020-01-01 00:00", + "end": "2020-01-02 00:00", + "zoneId": "America/Chicago", + "recurrence": { + "startTime": "23:00", + "durationMinutes": 60 + } + }, + "type": "PLANNED", + "scope": { + "entities": [], + "matches": [{ + "tags": [{ + "context": "CONTEXTLESS", + "key": "testing" + }] + }] + } +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/maintenance/mock_create_monthly_1.json b/tests/mockserver_payloads/requests/maintenance/mock_create_monthly_1.json new file mode 100644 index 0000000..5e0ddf6 --- /dev/null +++ b/tests/mockserver_payloads/requests/maintenance/mock_create_monthly_1.json @@ -0,0 +1,17 @@ +{ + "name": "Test Payload", + "description": "Generating Payload for Test", + "suppression": "DETECT_PROBLEMS_AND_ALERT", + "schedule": { + "recurrenceType": "MONTHLY", + "start": "2020-01-01 00:00", + "end": "2020-01-02 00:00", + "zoneId": "America/Chicago", + "recurrence": { + "startTime": "23:00", + "durationMinutes": 60, + "dayOfMonth": 1 + } + }, + "type": "PLANNED" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/maintenance/mock_create_once_1.json b/tests/mockserver_payloads/requests/maintenance/mock_create_once_1.json new file mode 100644 index 0000000..b849cf5 --- /dev/null +++ b/tests/mockserver_payloads/requests/maintenance/mock_create_once_1.json @@ -0,0 +1,12 @@ +{ + "name": "Test Payload", + "description": "Generating Payload for Test", + "suppression": "DETECT_PROBLEMS_AND_ALERT", + "schedule": { + "recurrenceType": "ONCE", + "start": "2020-01-01 00:00", + "end": "2020-01-02 00:00", + "zoneId": "America/Chicago" + }, + "type": "PLANNED" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/maintenance/mock_create_weekly_1.json b/tests/mockserver_payloads/requests/maintenance/mock_create_weekly_1.json new file mode 100644 index 0000000..2639fa9 --- /dev/null +++ b/tests/mockserver_payloads/requests/maintenance/mock_create_weekly_1.json @@ -0,0 +1,17 @@ +{ + "name": "Test Payload", + "description": "Generating Payload for Test", + "suppression": "DETECT_PROBLEMS_AND_ALERT", + "schedule": { + "recurrenceType": "WEEKLY", + "start": "2020-01-01 00:00", + "end": "2020-01-02 00:00", + "zoneId": "America/Chicago", + "recurrence": { + "startTime": "23:00", + "durationMinutes": 60, + "dayOfWeek": "SUNDAY" + } + }, + "type": "PLANNED" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/processes/tags.json b/tests/mockserver_payloads/requests/processes/tags.json new file mode 100644 index 0000000..dc89ff6 --- /dev/null +++ b/tests/mockserver_payloads/requests/processes/tags.json @@ -0,0 +1,6 @@ +{ + "tags": [ + "demo", + "example" + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/requests/services/tags.json b/tests/mockserver_payloads/requests/services/tags.json new file mode 100644 index 0000000..dc89ff6 --- /dev/null +++ b/tests/mockserver_payloads/requests/services/tags.json @@ -0,0 +1,6 @@ +{ + "tags": [ + "demo", + "example" + ] +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/get_all.json b/tests/mockserver_payloads/responses/get_all.json new file mode 100644 index 0000000..e69de29 diff --git a/tests/mockserver_payloads/responses/host_groups/mock_get_general_1.json b/tests/mockserver_payloads/responses/host_groups/mock_get_general_1.json new file mode 100644 index 0000000..2b383c5 --- /dev/null +++ b/tests/mockserver_payloads/responses/host_groups/mock_get_general_1.json @@ -0,0 +1,34 @@ +[{ + "entityId": "HOST-238441A17F95B305", + "displayName": "testserver", + "discoveredName": "testserver", + "firstSeenTimestamp": 1592513300463, + "lastSeenTimestamp": 1592980597441, + "tags": [], + "fromRelationships": {}, + "toRelationships": { + "isProcessOf": [], + "runsOn": [] + }, + "osType": "LINUX", + "osArchitecture": "X86", + "osVersion": "Debian GNU/Linux 10 (buster) (kernel 4.19.0-9-amd64)", + "bitness": "64bit", + "cpuCores": 1, + "logicalCpuCores": 2, + "monitoringMode": "FULL_STACK", + "networkZoneId": "default", + "agentVersion": { + "major": 1, + "minor": 195, + "revision": 54, + "timestamp": "20200529-113801", + "sourceRevision": "" + }, + "consumedHostUnits": 8.0, + "userLevel": "SUPERUSER", + "hostGroup": { + "meId": "HOST_GROUP-ABCDEFGH12345678", + "name": "HOST_GROUP_1" + } +}] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/hosts/get_all.json b/tests/mockserver_payloads/responses/hosts/get_all.json new file mode 100644 index 0000000..bae3691 --- /dev/null +++ b/tests/mockserver_payloads/responses/hosts/get_all.json @@ -0,0 +1,14 @@ +[ + { + "entityId": "HOST-ABC123DEF456GHIJ", + "consumedHostUnits": 0.25 + }, + { + "entityId": "HOST-5B9CE4E4E14185FA", + "consumedHostUnits": 0.25 + }, + { + "entityId": "HOST-421D60DB4A2EA929", + "consumedHostUnits": 3.5 + } +] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/hosts/get_single.json b/tests/mockserver_payloads/responses/hosts/get_single.json new file mode 100644 index 0000000..b27e2e7 --- /dev/null +++ b/tests/mockserver_payloads/responses/hosts/get_single.json @@ -0,0 +1,4 @@ +{ + "entityId": "HOST-ABC123DEF456GHIJ", + "consumedHostUnits": 0.25 +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/maintenance/mock_create_1.json b/tests/mockserver_payloads/responses/maintenance/mock_create_1.json new file mode 100644 index 0000000..de86d44 --- /dev/null +++ b/tests/mockserver_payloads/responses/maintenance/mock_create_1.json @@ -0,0 +1,5 @@ +{ + "id": "1a000000-200a-3000-4000-5abc00000000", + "name": "Test Payload", + "description": "Generating Payload for Test" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/processes/get_all_pgis.json b/tests/mockserver_payloads/responses/processes/get_all_pgis.json new file mode 100644 index 0000000..121da81 --- /dev/null +++ b/tests/mockserver_payloads/responses/processes/get_all_pgis.json @@ -0,0 +1,11 @@ +[ + { + "entityId": "PROCESS_GROUP_INSTANCE-ABC123DEF456GHI7" + }, + { + "entityId": "PROCESS_GROUP_INSTANCE-A6AAFEA17E6F60FD" + }, + { + "entityId": "PROCESS_GROUP_INSTANCE-F0967E6BFEE20424" + } +] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/processes/get_all_pgs.json b/tests/mockserver_payloads/responses/processes/get_all_pgs.json new file mode 100644 index 0000000..73e086a --- /dev/null +++ b/tests/mockserver_payloads/responses/processes/get_all_pgs.json @@ -0,0 +1,11 @@ +[ + { + "entityId": "PROCESS_GROUP-ABC123DEF456GHI7" + }, + { + "entityId": "PROCESS_GROUP-19DACA5E22637C33" + }, + { + "entityId": "PROCESS_GROUP-859E1549052CD876" + } +] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/processes/get_one_pg.json b/tests/mockserver_payloads/responses/processes/get_one_pg.json new file mode 100644 index 0000000..14223a1 --- /dev/null +++ b/tests/mockserver_payloads/responses/processes/get_one_pg.json @@ -0,0 +1,3 @@ +{ + "entityId": "PROCESS_GROUP-ABC123DEF456GHI7" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/processes/get_one_pgi.json b/tests/mockserver_payloads/responses/processes/get_one_pgi.json new file mode 100644 index 0000000..0898df1 --- /dev/null +++ b/tests/mockserver_payloads/responses/processes/get_one_pgi.json @@ -0,0 +1,3 @@ +{ + "entityId": "PROCESS_GROUP_INSTANCE-ABC123DEF456GHI7" +} \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/services/get_all.json b/tests/mockserver_payloads/responses/services/get_all.json new file mode 100644 index 0000000..bdcce80 --- /dev/null +++ b/tests/mockserver_payloads/responses/services/get_all.json @@ -0,0 +1,11 @@ +[ + { + "entityId": "SERVICE-ABC123DEF456GHI7" + }, + { + "entityId": "SERVICE-C096CE0BA471AEFD" + }, + { + "entityId": "SERVICE-B71ADA892013D156" + } +] \ No newline at end of file diff --git a/tests/mockserver_payloads/responses/services/get_one.json b/tests/mockserver_payloads/responses/services/get_one.json new file mode 100644 index 0000000..f5c0619 --- /dev/null +++ b/tests/mockserver_payloads/responses/services/get_one.json @@ -0,0 +1,3 @@ +{ + "entityId": "SERVICE-ABC123DEF456GHI7" +} \ No newline at end of file diff --git a/tests/test_host_groups.py b/tests/test_host_groups.py new file mode 100644 index 0000000..2ce311a --- /dev/null +++ b/tests/test_host_groups.py @@ -0,0 +1,33 @@ +"""Testing dynatrace.tenant.host_groups""" +import unittest +import user_variables +from tests import tooling_for_test +from dynatrace.tenant import host_groups + +CLUSTER = user_variables.FULL_SET["mockserver1"] +TENANT = "tenant1" +URL_PATH = "/api/v1/entity/infrastructure/hosts" + + +class TestHostGroupFunctions(unittest.TestCase): + RESPONSE_DIR = "tests/mockserver_payloads/responses/host_groups/" + + def test_get_host_groups_tenantwide(self): + parameters = { + "relativeTime": ["day"], + "includeDetails": ["true"], + } + mockserver_response_file = f"{self.RESPONSE_DIR}mock_get_general_1.json" + tooling_for_test.create_mockserver_expectation( + CLUSTER, TENANT, URL_PATH, "GET", parameters=parameters, response_file=mockserver_response_file) + command_tested = host_groups.get_host_groups_tenantwide( + CLUSTER, TENANT) + + expected_result = { + 'HOST_GROUP-ABCDEFGH12345678': 'HOST_GROUP_1' + } + self.assertEqual(command_tested, expected_result) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_maintenance_windows.py b/tests/test_maintenance_windows.py new file mode 100644 index 0000000..e76709a --- /dev/null +++ b/tests/test_maintenance_windows.py @@ -0,0 +1,468 @@ +"""Test Cases For Maintenance Windows.""" +import unittest +import user_variables +from tests import tooling_for_test +from dynatrace.tenant import maintenance +from dynatrace.requests.request_handler import TenantAPIs +from dynatrace.exceptions import InvalidDateFormatException + +CLUSTER = user_variables.FULL_SET["mockserver1"] +TENANT = "tenant1" +URL_PATH = str(TenantAPIs.MAINTENANCE_WINDOWS) +TEST_RANGE_START = "2020-01-01 00:00" +TEST_RANGE_END = "2020-01-02 00:00" +TEST_PAYLOAD_TITLE = "Test Payload" +TEST_PAYLOAD_DESC = "Generating Payload for Test" + + +class TestMaintenanceWindowCreate(unittest.TestCase): + """Test Cases for Creating a Maintenance Window""" + REQUEST_DIR = "tests/mockserver_payloads/requests/maintenance/" + RESPONSE_DIR = "tests/mockserver_payloads/responses/maintenance/" + + def test_create_daily_no_scope(self): + """ + Testing create daily Maintenance Window with no scope + """ + mockserver_request_file = f"{self.REQUEST_DIR}mock_create_daily_1.json" + mockserver_response_file = f"{self.RESPONSE_DIR}mock_create_1.json" + tooling_for_test.create_mockserver_expectation( + CLUSTER, + TENANT, + URL_PATH, + "POST", + request_file=mockserver_request_file, + response_file=mockserver_response_file, + ) + maintenance_schedule = maintenance.generate_schedule( + maintenance.RecurrenceType.DAILY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END + ) + maintenance_json = maintenance.generate_window_json( + TEST_PAYLOAD_TITLE, + TEST_PAYLOAD_DESC, + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT, + maintenance_schedule, + is_planned=True + ) + result = maintenance.create_window(CLUSTER, TENANT, maintenance_json) + self.assertEqual(result, tooling_for_test.expected_payload( + mockserver_response_file)) + + def test_create_daily_single_tag(self): + """Testing create daily Maintenance Window with a single tag scope""" + mockserver_request_file = f"{self.REQUEST_DIR}mock_create_daily_single_tag_1.json" + mockserver_response_file = f"{self.RESPONSE_DIR}mock_create_1.json" + tooling_for_test.create_mockserver_expectation( + CLUSTER, + TENANT, + URL_PATH, + "POST", + request_file=mockserver_request_file, + response_file=mockserver_response_file, + ) + maintenance_schedule = maintenance.generate_schedule( + maintenance.RecurrenceType.DAILY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END + ) + maintenance_scope = maintenance.generate_scope( + tags=[{'context': "CONTEXTLESS", 'key': "testing"}]) + maintenance_json = maintenance.generate_window_json( + TEST_PAYLOAD_TITLE, + TEST_PAYLOAD_DESC, + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT, + maintenance_schedule, + scope=maintenance_scope, + is_planned=True + ) + result = maintenance.create_window(CLUSTER, TENANT, maintenance_json) + self.assertEqual(result, tooling_for_test.expected_payload( + mockserver_response_file)) + + def test_create_daily_tags_and(self): + """Testing Payloads with multiple tags in an \"AND\" configuration""" + mockserver_request_file = f"{self.REQUEST_DIR}mock_create_daily_multi_tags_and_1.json" + mockserver_response_file = f"{self.RESPONSE_DIR}mock_create_1.json" + + tooling_for_test.create_mockserver_expectation( + CLUSTER, + TENANT, + URL_PATH, + "POST", + request_file=mockserver_request_file, + response_file=mockserver_response_file, + ) + maintenance_schedule = maintenance.generate_schedule( + maintenance.RecurrenceType.DAILY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END + ) + maintenance_scope = maintenance.generate_scope( + tags=[ + {'context': "CONTEXTLESS", 'key': "testing"}, + {'context': "CONTEXTLESS", 'key': "testing2"} + ], + match_any_tag=False + ) + maintenance_json = maintenance.generate_window_json( + TEST_PAYLOAD_TITLE, + TEST_PAYLOAD_DESC, + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT, + maintenance_schedule, + scope=maintenance_scope, + is_planned=True + ) + result = maintenance.create_window(CLUSTER, TENANT, maintenance_json) + self.assertEqual(result, tooling_for_test.expected_payload( + mockserver_response_file)) + + def test_create_daily_tags_or(self): + """Testing Payloads with multiple tags in an \"AND\" configuration""" + mockserver_request_file = f"{self.REQUEST_DIR}mock_create_daily_multi_tags_or_1.json" + mockserver_response_file = f"{self.RESPONSE_DIR}mock_create_1.json" + + tooling_for_test.create_mockserver_expectation( + CLUSTER, + TENANT, + URL_PATH, + "POST", + request_file=mockserver_request_file, + response_file=mockserver_response_file, + ) + maintenance_schedule = maintenance.generate_schedule( + maintenance.RecurrenceType.DAILY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END + ) + maintenance_scope = maintenance.generate_scope( + tags=[ + {'context': "CONTEXTLESS", 'key': "testing"}, + {'context': "CONTEXTLESS", 'key': "testing2"} + ], + match_any_tag=True + ) + maintenance_json = maintenance.generate_window_json( + TEST_PAYLOAD_TITLE, + TEST_PAYLOAD_DESC, + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT, + maintenance_schedule, + scope=maintenance_scope, + is_planned=True + ) + result = maintenance.create_window(CLUSTER, TENANT, maintenance_json) + self.assertEqual(result, tooling_for_test.expected_payload( + mockserver_response_file)) + + def test_create_once_no_scope(self): + """Testing Payloads with ONCE recurrance type""" + mockserver_request_file = f"{self.REQUEST_DIR}mock_create_once_1.json" + mockserver_response_file = f"{self.RESPONSE_DIR}mock_create_1.json" + + tooling_for_test.create_mockserver_expectation( + CLUSTER, + TENANT, + URL_PATH, + "POST", + request_file=mockserver_request_file, + response_file=mockserver_response_file, + ) + maintenance_schedule = maintenance.generate_schedule( + maintenance.RecurrenceType.ONCE, + #TODO Remove need for these variables. ONCE does not use them + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END + ) + maintenance_json = maintenance.generate_window_json( + TEST_PAYLOAD_TITLE, + TEST_PAYLOAD_DESC, + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT, + maintenance_schedule, + is_planned=True + ) + result = maintenance.create_window(CLUSTER, TENANT, maintenance_json) + self.assertEqual(result, tooling_for_test.expected_payload( + mockserver_response_file)) + + def test_create_weekly_no_scope(self): + """Testing Payloads with WEEKLY recurrance type""" + mockserver_request_file = f"{self.REQUEST_DIR}mock_create_weekly_1.json" + mockserver_response_file = f"{self.RESPONSE_DIR}mock_create_1.json" + + tooling_for_test.create_mockserver_expectation( + CLUSTER, + TENANT, + URL_PATH, + "POST", + request_file=mockserver_request_file, + response_file=mockserver_response_file, + ) + maintenance_schedule = maintenance.generate_schedule( + maintenance.RecurrenceType.WEEKLY, + #TODO Remove need for these variables. ONCE does not use them + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + day=maintenance.DayOfWeek.SUNDAY + ) + maintenance_json = maintenance.generate_window_json( + TEST_PAYLOAD_TITLE, + TEST_PAYLOAD_DESC, + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT, + maintenance_schedule, + is_planned=True + ) + result = maintenance.create_window(CLUSTER, TENANT, maintenance_json) + self.assertEqual(result, tooling_for_test.expected_payload( + mockserver_response_file)) + + def test_create_monthly_no_scope(self): + """Testing Payloads with MONTHLY recurrance type""" + mockserver_request_file = f"{self.REQUEST_DIR}mock_create_monthly_1.json" + mockserver_response_file = f"{self.RESPONSE_DIR}mock_create_1.json" + + tooling_for_test.create_mockserver_expectation( + CLUSTER, + TENANT, + URL_PATH, + "POST", + request_file=mockserver_request_file, + response_file=mockserver_response_file, + ) + maintenance_schedule = maintenance.generate_schedule( + maintenance.RecurrenceType.MONTHLY, + #TODO Remove need for these variables. ONCE does not use them + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + day=1 + ) + maintenance_json = maintenance.generate_window_json( + TEST_PAYLOAD_TITLE, + TEST_PAYLOAD_DESC, + maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT, + maintenance_schedule, + is_planned=True + ) + result = maintenance.create_window(CLUSTER, TENANT, maintenance_json) + self.assertEqual(result, tooling_for_test.expected_payload( + mockserver_response_file)) + +class TestMaintenanceExceptions(unittest.TestCase): + def test_invalid_recurrence_type(self): + """Testing exception thrown for invalid recurrence type""" + with self.assertRaises(ValueError) as context: + maintenance.generate_schedule( + "HOURLY", + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + ) + self.assertTrue("Invalid Recurrence Type!" in str(context.exception)) + def test_invalid_day_of_week(self): + """Testing exception thrown for invalid dayOfWeek""" + with self.assertRaises(ValueError) as context: + maintenance.generate_schedule( + maintenance.RecurrenceType.WEEKLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + day=1 + ) + self.assertTrue("Invalid Weekly Day!" in str(context.exception)) + + def test_invalid_day_of_month_value(self): + """Testing exception thrown for invalid dayOfMonth for incorrect int""" + with self.assertRaises(ValueError) as context: + maintenance.generate_schedule( + maintenance.RecurrenceType.MONTHLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + day=32 + ) + self.assertTrue("Invalid Monthly Day!" in str(context.exception)) + + def test_invalid_day_of_month_type(self): + """Testing exception thrown for invalid dayOfMonth for a non-int""" + with self.assertRaises(TypeError) as context: + maintenance.generate_schedule( + maintenance.RecurrenceType.MONTHLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + day="Eleven" + ) + self.assertTrue("Invalid type for Day of Month! Int between 1-31 required" in str(context.exception)) + + def test_no_day_of_week_supplied(self): + """Weekly Maintenance Window with no dayOfWeek supplied""" + with self.assertRaises(Exception) as context: + maintenance.generate_schedule( + maintenance.RecurrenceType.WEEKLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + ) + self.assertTrue("Invalid Weekly Day!" in str(context.exception)) + + def test_no_day_of_month_supplied(self): + """Monthly Maintenance Window with no dayOfMonth supplied""" + with self.assertRaises(Exception) as context: + maintenance.generate_schedule( + maintenance.RecurrenceType.MONTHLY, + "23:00", + 60, + TEST_RANGE_START, + TEST_RANGE_END, + ) + self.assertTrue("Invalid type for Day of Month!" in str(context.exception)) + + def test_invalid_datetime_format(self): + """Test invalid datetime supplied to trigger ValueError""" + #TODO Fix Exceoption to have a message as first arg + with self.assertRaises(InvalidDateFormatException) as context: + maintenance.generate_schedule( + maintenance.RecurrenceType.DAILY, + "23:00", + 60, + TEST_RANGE_START, + "2020-01-02" + ) + self.assertTrue("Incorrect Date " in context.exception.message, context.exception.message) + def test_invalid_filter_type(self): + """Invalid Filter_Type""" + with self.assertRaises(ValueError) as context: + maintenance.generate_scope( + tags=[{'context': "CONTEXTLESS", 'key': "testing"}], + filter_type="INVALID_TYPE" + ) + self.assertTrue("Invalid Filter Type" in (msg := str(context.exception)), msg) + + +class TestMaintenanceEnumTypes(unittest.TestCase): + def test_suppression_enum_str(self): + suppression = maintenance.Suppression(maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT) + self.assertIsInstance(maintenance.Suppression.__str__(suppression), str) + + def test_suppression_enum_repr(self): + suppression = maintenance.Suppression(maintenance.Suppression.DETECT_PROBLEMS_AND_ALERT) + self.assertIsInstance(maintenance.Suppression.__repr__(suppression), str) + + def test_day_of_week_enum_str(self): + day_of_week = maintenance.DayOfWeek(maintenance.DayOfWeek.MONDAY) + self.assertIsInstance(maintenance.DayOfWeek.__str__(day_of_week), str) + + def test_day_of_week_enum_repr(self): + day_of_week = maintenance.DayOfWeek(maintenance.DayOfWeek.MONDAY) + self.assertIsInstance(maintenance.DayOfWeek.__repr__(day_of_week), str) + + def test_context_enum_str(self): + context = maintenance.Context(maintenance.Context.CONTEXTLESS) + self.assertIsInstance(maintenance.Context.__str__(context), str) + + def test_context_enum_repr(self): + context = maintenance.Context(maintenance.Context.CONTEXTLESS) + self.assertIsInstance(maintenance.Context.__repr__(context), str) + + def test_recurrence_type_enum_str(self): + recurrence_type = maintenance.RecurrenceType(maintenance.RecurrenceType.DAILY) + self.assertIsInstance(maintenance.RecurrenceType.__str__(recurrence_type), str) + + def test_recurrence_type_enum_repr(self): + recurrence_type = maintenance.RecurrenceType(maintenance.RecurrenceType.DAILY) + self.assertIsInstance(maintenance.RecurrenceType.__repr__(recurrence_type), str) + + def test_filter_type_enum_str(self): + suppression = maintenance.FilterType(maintenance.FilterType.APM_SECURITY_GATEWAY) + self.assertIsInstance(maintenance.FilterType.__str__(suppression), str) + + def test_filter_type_enum_repr(self): + suppression = maintenance.FilterType(maintenance.FilterType.APM_SECURITY_GATEWAY) + self.assertIsInstance(maintenance.FilterType.__repr__(suppression), str) + + +class TestTagParsing(unittest.TestCase): + def test_tag_variations(self): + """Testing various ways tags need to be parsed""" + # Test 1 - Key + # Test 2 - Key, Value + # Test 3 - Context, Key and Value + # Test 4 - Key with Colon, Value + # Test 5 - Key with Colon, Value Blank + # Test 6 - Context, Key with Colon and Value + # Test 7 - Context, Key + # Test 8 - Context, Key with square brackets + # Test 9 - Context, Key with colon and squares + # Test 10 - Empty Context with squares + + test_tag_list = [ + "Key", + "Key:Value", + "[Context]Key:Value", + "Key:withColon:Value", + "Key:withColon:", + "[Context]Key:withColon:Value", + "[Context]Key", + "[Context][KeywithSquares]", + "[Context][KeyWithSquares]:AndColons:Value", + "[][KeywithSquares]", + ] + + test_tag_expected_results = [ + {'context': 'CONTEXTLESS', 'key': 'Key'}, + {'context': 'CONTEXTLESS', 'key': 'Key:Value'}, + {'context': 'Context', 'key': 'Key:Value'}, + {'context': 'CONTEXTLESS', 'key': 'Key:withColon:Value'}, + {'context': 'CONTEXTLESS', 'key': 'Key:withColon:'}, + {'context': 'Context', 'key': 'Key:withColon:Value'}, + {'context': 'Context', 'key': 'Key'}, + {'context': 'Context', 'key': '[KeywithSquares]'}, + {'context': 'Context', 'key': '[KeyWithSquares]:AndColons:Value'}, + {'context': 'CONTEXTLESS', 'key': '[][KeywithSquares]'}, + ] + + for i in range(0, len(test_tag_list)): + processed_tag = test_tag_list[i] + self.assertTrue( + (result := maintenance.parse_tag(processed_tag)) == test_tag_expected_results[i], + f"Test {i}: {result} did not match {test_tag_expected_results[i]}") + +if __name__ == '__main__': + unittest.main() + +# CREATE TESTS LEFT: +# Single Entity +# Multi Entity +# Single Tag with Filter Type +# Mutli Tags with Filter Type +# Single Tag with Management Zone +# Multi Tags with Management Zone + +# EXCEPTION TEST CASES: +# MANAGEMENT_ZONE WITHOUT TAG +# FILTER_TYPE WITHOUT TAG + +# OTHER TEST CASES: +# GET ALL WINDOWS +# GET DETAILS OF WINDOW +# DELETE WINDOW +# UPDATE WINDOW \ No newline at end of file diff --git a/tests/test_topology_hosts.py b/tests/test_topology_hosts.py new file mode 100644 index 0000000..ac99dc7 --- /dev/null +++ b/tests/test_topology_hosts.py @@ -0,0 +1,129 @@ +""" +Test Suite for Topology Hosts +""" +import unittest +from user_variables import FULL_SET +from tests import tooling_for_test as testtools +from dynatrace.requests.request_handler import TenantAPIs +from dynatrace.tenant.topology import hosts + +CLUSTER = FULL_SET["mockserver1"] +TENANT = "tenant1" +URL_PATH = f"{TenantAPIs.V1_TOPOLOGY}/infrastructure/hosts" +REQUEST_DIR = "tests/mockserver_payloads/requests/hosts" +RESPONSE_DIR = "tests/mockserver_payloads/responses/hosts" + + +class TestGetHosts(unittest.TestCase): + """Tests cases for fetching topology hosts.""" + + def test_get_all_hosts(self): + """Test fetching all hosts""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file + ) + + result = hosts.get_hosts_tenantwide(CLUSTER, TENANT) + self.assertEqual(result, testtools.expected_payload(response_file)) + + def test_get_single_host(self): + """Test fetching a specific host""" + + host_id = "HOST-9F74450267BAAE20" + response_file = f"{RESPONSE_DIR}/get_single.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{host_id}", + request_type="GET", + response_file=response_file + ) + + result = hosts.get_host(CLUSTER, TENANT, host_id) + self.assertEqual(result, testtools.expected_payload(response_file)) + + def test_get_host_count(self): + """Test getting the count of hosts in a tenant.""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file, + parameters=dict(relativeTime=['day'], + includeDetails=['False']) + ) + + result = hosts.get_host_count_tenantwide(CLUSTER, TENANT) + self.assertEqual(result, 3) + + def test_get_host_units(self): + """Tests getting the consumed host units in a tenant.""" + + response_file = f"{RESPONSE_DIR}/get_all.json" + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file + ) + + result = hosts.get_host_units_tenantwide(CLUSTER, TENANT) + self.assertEqual(result, 4) + + hosts.set_host_properties + + +class TestHostTagging(unittest.TestCase): + """Test cases for testing host-level tagging.""" + + def test_add_tags(self): + """Test adding two tags to a specific host.""" + + host_id = "HOST-ABC123DEF456GHIJ" + request_file = f"{REQUEST_DIR}/tags.json" + tags = ["demo", "example"] + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + request_type="POST", + url_path=f"{URL_PATH}/{host_id}", + request_file=request_file, + response_code=201 + ) + + result = hosts.add_host_tags(CLUSTER, TENANT, host_id, tags) + self.assertEqual(result, 201) + + def test_delete_tags(self): + """Test deleting a tag from a specific host.""" + + host_id = "HOST-ABC123DEF456GHIJ" + tag = "demo" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{host_id}/tags/{tag}", + request_type="DELETE", + response_code=204 + ) + + result = hosts.delete_host_tag(CLUSTER, TENANT, host_id, tag) + self.assertEqual(204, result.status_code) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_topology_process_groups.py b/tests/test_topology_process_groups.py new file mode 100644 index 0000000..5f103b2 --- /dev/null +++ b/tests/test_topology_process_groups.py @@ -0,0 +1,91 @@ +"""Test Suite for Topology Process Groups""" + +import unittest +from user_variables import FULL_SET +from tests import tooling_for_test as testtools +from dynatrace.requests.request_handler import TenantAPIs +from dynatrace.tenant.topology import process_groups + +CLUSTER = FULL_SET.get('mockserver1') +TENANT = 'tenant1' +URL_PATH = f"{TenantAPIs.V1_TOPOLOGY}/infrastructure/process-groups" +REQUEST_DIR = "tests/mockserver_payloads/requests/processes" +RESPONSE_DIR = "tests/mockserver_payloads/responses/processes" + + +class TestGetPGs(unittest.TestCase): + """Test cases for fetching topology process groups.""" + + def test_get_all_pgs(self): + """Test fetching all PGs""" + response_file = f"{RESPONSE_DIR}/get_all_pgs.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file + ) + + result = process_groups.get_process_groups_tenantwide(CLUSTER, TENANT) + self.assertEqual(result, testtools.expected_payload(response_file)) + + def test_get_single_pg(self): + """Test fetching single PG""" + response_file = f"{RESPONSE_DIR}/get_one_pg.json" + pg_id = "PROCESS_GROUP-ABC123DEF456GHI7" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{pg_id}", + request_type="GET", + response_file=response_file + ) + + result = process_groups.get_process_group(CLUSTER, TENANT, pg_id) + self.assertEqual(result, testtools.expected_payload(response_file)) + + def test_get_pg_count(self): + """Test getting the PG count tenantwide.""" + response_file = f"{RESPONSE_DIR}/get_all_pgs.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file + ) + + result = process_groups.get_process_group_count_tenantwide(CLUSTER, + TENANT) + self.assertEqual(result, 3) + + +class TestPGTags(unittest.TestCase): + """Test cases for PG tags""" + + def test_add_pg_tags(self): + """Test adding two tags to the PG.""" + pg_id = "PROCESS_GROUP-859E1549052CD876" + request_file = f"{REQUEST_DIR}/tags.json" + tags = ["demo", "example"] + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + request_type="POST", + url_path=f"{URL_PATH}/{pg_id}", + request_file=request_file, + response_code=201 + ) + + result = process_groups.add_process_group_tags(CLUSTER, TENANT, + pg_id, tags) + self.assertEqual(result, 201) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_topology_processes.py b/tests/test_topology_processes.py new file mode 100644 index 0000000..3fc29b1 --- /dev/null +++ b/tests/test_topology_processes.py @@ -0,0 +1,51 @@ +"""Test suite for Topology Processes""" + +import unittest +from user_variables import FULL_SET +from tests import tooling_for_test as testtools +from dynatrace.requests.request_handler import TenantAPIs +from dynatrace.tenant.topology import process + +CLUSTER = FULL_SET.get('mockserver1') +TENANT = 'tenant1' +URL_PATH = f"{TenantAPIs.V1_TOPOLOGY}/infrastructure/processes" +RESPONSE_DIR = "tests/mockserver_payloads/responses/processes" + + +class TestGetProcesses(unittest.TestCase): + """Test cases for fetching topology processes.""" + + def test_get_all_processes(self): + """Test getting all processes tenantwide.""" + response_file = f"{RESPONSE_DIR}/get_all_pgis.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file + ) + + result = process.get_processes_tenantwide(CLUSTER, TENANT) + self.assertEqual(result, testtools.expected_payload(response_file)) + + def test_get_single_process(self): + """Tests getting one specific process.""" + response_file = f"{RESPONSE_DIR}/get_one_pgi.json" + process_id = "PROCESS_GROUP_INSTANCE-ABC123DEF456GHI7" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{process_id}", + request_type="GET", + response_file=response_file + ) + + result = process.get_process(CLUSTER, TENANT, process_id) + self.assertEqual(result, testtools.expected_payload(response_file)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_topology_services.py b/tests/test_topology_services.py new file mode 100644 index 0000000..68e02ac --- /dev/null +++ b/tests/test_topology_services.py @@ -0,0 +1,89 @@ +"""Test Suite for Topology Services""" + +import unittest +from user_variables import FULL_SET +from tests import tooling_for_test as testtools +from dynatrace.requests.request_handler import TenantAPIs +from dynatrace.tenant.topology import services + +CLUSTER = FULL_SET.get('mockserver1') +TENANT = 'tenant1' +URL_PATH = f"{TenantAPIs.V1_TOPOLOGY}/infrastructure/services" +REQUEST_DIR = "tests/mockserver_payloads/requests/services" +RESPONSE_DIR = "tests/mockserver_payloads/responses/services" + + +class TestGetServices(unittest.TestCase): + """Test cases for fetching topology services.""" + + def test_get_all_svc(self): + """Test fetching all services""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file + ) + + result = services.get_services_tenantwide(CLUSTER, TENANT) + self.assertEqual(result, testtools.expected_payload(response_file)) + + def test_get_single_svc(self): + """Test fetching single service""" + response_file = f"{RESPONSE_DIR}/get_one.json" + svc_id = "SERVICE-ABC123DEF456GHI7" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=f"{URL_PATH}/{svc_id}", + request_type="GET", + response_file=response_file + ) + + result = services.get_service(CLUSTER, TENANT, svc_id) + self.assertEqual(result, testtools.expected_payload(response_file)) + + def test_get_svc_count(self): + """Test getting the service count tenantwide.""" + response_file = f"{RESPONSE_DIR}/get_all.json" + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + url_path=URL_PATH, + request_type="GET", + response_file=response_file + ) + + result = services.get_service_count_tenantwide(CLUSTER, TENANT) + self.assertEqual(result, 3) + + +class TestServiceTags(unittest.TestCase): + """Test cases for service tags""" + + def test_add_svc_tags(self): + """Test adding two tags to the service.""" + svc_id = "SERVICE-ABC123DEF456GHI7" + request_file = f"{REQUEST_DIR}/tags.json" + tags = ["demo", "example"] + + testtools.create_mockserver_expectation( + cluster=CLUSTER, + tenant=TENANT, + request_type="POST", + url_path=f"{URL_PATH}/{svc_id}", + request_file=request_file, + response_code=201 + ) + + result = services.add_service_tags(CLUSTER, TENANT, svc_id, tags) + self.assertEqual(result, 201) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/tooling_for_test.py b/tests/tooling_for_test.py new file mode 100644 index 0000000..27e14fb --- /dev/null +++ b/tests/tooling_for_test.py @@ -0,0 +1,78 @@ +"""Mockserver Expectation Setup""" +import requests +import json +import logging +from dynatrace.requests.request_handler import generate_tenant_url + +logging.basicConfig(filename="testing_tools.log", level=logging.DEBUG) + + +def create_mockserver_expectation(cluster, tenant, url_path, request_type, **kwargs): + requests.packages.urllib3.disable_warnings() + expectation = { + "httpRequest": { + "headers": { + "Authorization": [f"Api-Token {cluster.get('api_token').get(tenant)}"], + }, + "path": url_path, + "method": request_type + }, + "httpResponse": { + "statusCode": 200 + }, + "times": { + "remainingTimes": 1, + "unlimited": False + }, + "id": "OneOff", + } + + logging.debug(f"URL PATH: {url_path}") + logging.debug(f"KWARGS {kwargs}") + # Paramaters should always at least have Api-Token + if 'parameters' in kwargs: + expectation["httpRequest"]["queryStringParameters"] = kwargs['parameters'] + + if "request_file" in kwargs: + with open(kwargs['request_file']) as f: + request_payload = json.load(f) + expectation["httpRequest"]["body"] = { + "type": "JSON", + "json": request_payload, + } + + if "response_file" in kwargs: + with open(kwargs['response_file']) as f: + response_payload = json.load(f) + expectation["httpResponse"]["body"] = { + "type": "JSON", + "json": response_payload, + } + expectation["httpResponse"]["headers"] = { + "content-type": ["application/json"] + } + + if "response_code" in kwargs: + expectation["httpResponse"]["statusCode"] = kwargs["response_code"] + + if "mock_id" in kwargs: + expectation["id"] = kwargs["mock_id"] + + logging.debug(expectation) + + expectation_url = f"{generate_tenant_url(cluster, tenant)}/mockserver/expectation" + test_req = requests.request( + "PUT", + expectation_url, + json=expectation, + verify=False + ) + logging.debug(test_req.text) + if test_req.status_code > 300: + print(expectation, test_req.status_code, test_req.text, end="\n") + raise ValueError(test_req.status_code) + + +def expected_payload(json_file): + with open(json_file) as f: + return json.load(f)