diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index fdcc07c954..9ac83e6c63 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,6 +12,7 @@ This will expedite the process of getting your pull request merged and avoid ext --- ### PR information +- [ ] Ensure development PR is based on the `develop` branch. - [ ] The title of the PR is clear and informative. - [ ] There are a small number of commits, each of which has an informative message. This means that previously merged commits do not appear in the history of the PR. For information on cleaning up the commits in your pull request, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md). - [ ] If applicable, the PR references the bug/issue that it fixes in the description. diff --git a/.github/workflows/ci_pr.yml b/.github/workflows/ci_pr.yml index bf454f45b3..9ae44ec8cb 100644 --- a/.github/workflows/ci_pr.yml +++ b/.github/workflows/ci_pr.yml @@ -9,13 +9,13 @@ on: jobs: test-python-2_6-and-3_4-versions: - + strategy: fail-fast: false matrix: include: - - python-version: 2.6 - - python-version: 3.4 + - python-version: "2.6" + - python-version: "3.4" name: "Python ${{ matrix.python-version }} Unit Tests" runs-on: ubuntu-20.04 @@ -43,7 +43,7 @@ jobs: - name: Test with nosetests run: | - if [[ ${{ matrix.python-version }} == 2.6 ]]; then + if [[ ${{ matrix.python-version }} == "2.6" ]]; then source /home/waagent/virtualenv/python2.6.9/bin/activate else source /home/waagent/virtualenv/python3.4.8/bin/activate @@ -87,30 +87,23 @@ jobs: fail-fast: false matrix: include: - - python-version: 3.5 - PYLINTOPTS: "--rcfile=ci/3.6.pylintrc --ignore=tests_e2e,makepkg.py" - - - python-version: 3.6 - PYLINTOPTS: "--rcfile=ci/3.6.pylintrc --ignore=tests_e2e" - - - python-version: 3.7 - PYLINTOPTS: "--rcfile=ci/3.6.pylintrc --ignore=tests_e2e" - - - python-version: 3.8 - PYLINTOPTS: "--rcfile=ci/3.6.pylintrc --ignore=tests_e2e" - - - python-version: 3.9 - PYLINTOPTS: "--rcfile=ci/3.6.pylintrc" + - python-version: "3.5" + # workaround found in https://github.com/actions/setup-python/issues/866 + # for issue "[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:728)" on Python 3.5 + pip_trusted_host: "pypi.python.org pypi.org files.pythonhosted.org" + - python-version: "3.6" + - python-version: "3.7" + - python-version: "3.8" + - python-version: "3.9" additional-nose-opts: "--with-coverage --cover-erase --cover-inclusive --cover-branches --cover-package=azurelinuxagent" + - python-version: "3.10" + - python-version: "3.11" name: "Python ${{ matrix.python-version }} Unit Tests" runs-on: ubuntu-20.04 env: - PYLINTOPTS: ${{ matrix.PYLINTOPTS }} - PYLINTFILES: "azurelinuxagent setup.py makepkg.py tests tests_e2e" NOSEOPTS: "--with-timer ${{ matrix.additional-nose-opts }}" - PYTHON_VERSION: ${{ matrix.python-version }} steps: @@ -121,6 +114,8 @@ jobs: uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} + env: + PIP_TRUSTED_HOST: ${{ matrix.pip_trusted_host }} - name: Install dependencies id: install-dependencies @@ -128,19 +123,60 @@ jobs: sudo env "PATH=$PATH" python -m pip install --upgrade pip sudo env "PATH=$PATH" pip install -r requirements.txt sudo env "PATH=$PATH" pip install -r test-requirements.txt + sudo env "PATH=$PATH" pip install --upgrade pylint - name: Run pylint run: | - pylint $PYLINTOPTS --jobs=0 $PYLINTFILES + # + # List of files/directories to be checked by pylint. + # The end-to-end tests run only on Python 3.9 and we lint them only on that version. + # + PYLINT_FILES="azurelinuxagent setup.py makepkg.py tests" + if [[ "${{ matrix.python-version }}" == "3.9" ]]; then + PYLINT_FILES="$PYLINT_FILES tests_e2e" + fi - - name: Test with nosetests + # + # Command-line options for pylint. + # * "unused-private-member" is not implemented on 3.5 and will produce "E0012: Bad option value 'unused-private-member' (bad-option-value)" + # so we suppress "bad-option-value". + # * 3.9 will produce "no-member" for several properties/methods that are added to the mocks used by the unit tests (e.g + # "E1101: Instance of 'WireProtocol' has no 'aggregate_status' member") so we suppress that warning. + # * On 3.9 pylint crashes when parsing azurelinuxagent/daemon/main.py (see https://github.com/pylint-dev/pylint/issues/9473), so we ignore it. + # * 'no-self-use' ("R0201: Method could be a function") was moved to an optional extension on 3.8 and is no longer used by default. It needs + # to be suppressed for previous versions (3.0-3.7), though. + # * 'contextmanager-generator-missing-cleanup' are false positives if yield is used inside an if-else block for contextmanager generator functions. + # (https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/contextmanager-generator-missing-cleanup.html). + # This is not implemented on versions (3.0-3.7) Bad option value 'contextmanager-generator-missing-cleanup' (bad-option-value) + # * 3.9-3.11 will produce "too-many-positional-arguments" for several methods that are having more than 5 args, so we suppress that warning. + # (R0917: Too many positional arguments (8/5) (too-many-positional-arguments)) + PYLINT_OPTIONS="--rcfile=ci/pylintrc --jobs=0" + if [[ "${{ matrix.python-version }}" == "3.9" ]]; then + PYLINT_OPTIONS="$PYLINT_OPTIONS --disable=no-member,too-many-positional-arguments --ignore=main.py" + fi + if [[ "${{ matrix.python-version }}" =~ ^3\.(10|11)$ ]]; then + PYLINT_OPTIONS="$PYLINT_OPTIONS --disable=too-many-positional-arguments" + fi + if [[ "${{ matrix.python-version }}" =~ ^3\.[0-7]$ ]]; then + PYLINT_OPTIONS="$PYLINT_OPTIONS --disable=no-self-use,bad-option-value" + fi + + echo "PYLINT_OPTIONS: $PYLINT_OPTIONS" + echo "PYLINT_FILES: $PYLINT_FILES" + + pylint $PYLINT_OPTIONS $PYLINT_FILES + + - name: Execute Unit Tests if: success() || (failure() && steps.install-dependencies.outcome == 'success') run: | - ./ci/nosetests.sh - exit $? + if [[ "${{ matrix.python-version }}" =~ ^3\.[1-9][0-9]+$ ]]; then + ./ci/pytest.sh + else + ./ci/nosetests.sh + fi - name: Compile Coverage - if: matrix.python-version == 3.9 + if: matrix.python-version == '3.9' run: | echo looking for coverage files : ls -alh | grep -i coverage @@ -149,7 +185,7 @@ jobs: sudo env "PATH=$PATH" coverage report - name: Upload Coverage - if: matrix.python-version == 3.9 + if: matrix.python-version == '3.9' uses: codecov/codecov-action@v3 with: file: ./coverage.xml diff --git a/README.md b/README.md index 5a5b126f2f..fd6c9fe5ad 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ Waagent depends on some system packages in order to function properly: * Filesystem utilities: sfdisk, fdisk, mkfs, parted * Password tools: chpasswd, sudo * Text processing tools: sed, grep -* Network tools: ip-route +* Network tools: ip-route, iptables ## Installation @@ -568,6 +568,13 @@ OpenSSL commands. This signals OpenSSL to use any installed FIPS-compliant libra Note that the agent itself has no FIPS-specific code. _If no FIPS-compliant certificates are installed, then enabling this option will cause all OpenSSL commands to fail._ +#### __OS.EnableFirewall__ + +_Type: Boolean_ +_Default: n (set to 'y' in waagent.conf)_ + +Creates firewall rules to allow communication with the VM Host only by the Agent. + #### __OS.MonitorDhcpClientRestartPeriod__ _Type: Integer_ diff --git a/azurelinuxagent/agent.py b/azurelinuxagent/agent.py index 2811e215ed..bfb795c6b9 100644 --- a/azurelinuxagent/agent.py +++ b/azurelinuxagent/agent.py @@ -23,14 +23,18 @@ from __future__ import print_function +import json import os import re import subprocess import sys import threading + +from azurelinuxagent.common.exception import CGroupsException from azurelinuxagent.ga import logcollector, cgroupconfigurator -from azurelinuxagent.ga.cgroup import AGENT_LOG_COLLECTOR, CpuCgroup, MemoryCgroup -from azurelinuxagent.ga.cgroupapi import SystemdCgroupsApi +from azurelinuxagent.ga.cgroupcontroller import AGENT_LOG_COLLECTOR +from azurelinuxagent.ga.cpucontroller import _CpuController +from azurelinuxagent.ga.cgroupapi import get_cgroup_api, log_cgroup_warning, InvalidCgroupMountpointException import azurelinuxagent.common.conf as conf import azurelinuxagent.common.event as event @@ -131,7 +135,7 @@ def daemon(self): """ set_daemon_version(AGENT_VERSION) logger.set_prefix("Daemon") - threading.current_thread().setName("Daemon") + threading.current_thread().name = "Daemon" child_args = None \ if self.conf_file_path is None \ else "-configuration-path:{0}".format(self.conf_file_path) @@ -171,7 +175,7 @@ def run_exthandlers(self, debug=False): Run the update and extension handler """ logger.set_prefix("ExtHandler") - threading.current_thread().setName("ExtHandler") + threading.current_thread().name = "ExtHandler" # # Agents < 2.2.53 used to echo the log to the console. Since the extension handler could have been started by @@ -206,42 +210,57 @@ def collect_logs(self, is_full_mode): # Check the cgroups unit log_collector_monitor = None - cgroups_api = SystemdCgroupsApi() - cpu_cgroup_path, memory_cgroup_path = cgroups_api.get_process_cgroup_paths("self") + tracked_controllers = [] if CollectLogsHandler.is_enabled_monitor_cgroups_check(): - cpu_slice_matches = (cgroupconfigurator.LOGCOLLECTOR_SLICE in cpu_cgroup_path) - memory_slice_matches = (cgroupconfigurator.LOGCOLLECTOR_SLICE in memory_cgroup_path) + try: + cgroup_api = get_cgroup_api() + except InvalidCgroupMountpointException as e: + log_cgroup_warning("The agent does not support cgroups if the default systemd mountpoint is not being used: {0}".format(ustr(e)), send_event=True) + sys.exit(logcollector.INVALID_CGROUPS_ERRCODE) + except CGroupsException as e: + log_cgroup_warning("Unable to determine which cgroup version to use: {0}".format(ustr(e)), send_event=True) + sys.exit(logcollector.INVALID_CGROUPS_ERRCODE) - if not cpu_slice_matches or not memory_slice_matches: - logger.info("The Log Collector process is not in the proper cgroups:") - if not cpu_slice_matches: - logger.info("\tunexpected cpu slice") - if not memory_slice_matches: - logger.info("\tunexpected memory slice") + log_collector_cgroup = cgroup_api.get_process_cgroup(process_id="self", cgroup_name=AGENT_LOG_COLLECTOR) + tracked_controllers = log_collector_cgroup.get_controllers() + if len(tracked_controllers) != len(log_collector_cgroup.get_supported_controller_names()): + log_cgroup_warning("At least one required controller is missing. The following controllers are required for the log collector to run: {0}".format(log_collector_cgroup.get_supported_controller_names())) sys.exit(logcollector.INVALID_CGROUPS_ERRCODE) - def initialize_cgroups_tracking(cpu_cgroup_path, memory_cgroup_path): - cpu_cgroup = CpuCgroup(AGENT_LOG_COLLECTOR, cpu_cgroup_path) - msg = "Started tracking cpu cgroup {0}".format(cpu_cgroup) - logger.info(msg) - cpu_cgroup.initialize_cpu_usage() - memory_cgroup = MemoryCgroup(AGENT_LOG_COLLECTOR, memory_cgroup_path) - msg = "Started tracking memory cgroup {0}".format(memory_cgroup) - logger.info(msg) - return [cpu_cgroup, memory_cgroup] + if not log_collector_cgroup.check_in_expected_slice(cgroupconfigurator.LOGCOLLECTOR_SLICE): + log_cgroup_warning("The Log Collector process is not in the proper cgroups", send_event=False) + sys.exit(logcollector.INVALID_CGROUPS_ERRCODE) try: log_collector = LogCollector(is_full_mode) - # Running log collector resource(CPU, Memory) monitoring only if agent starts the log collector. + # Running log collector resource monitoring only if agent starts the log collector. # If Log collector start by any other means, then it will not be monitored. if CollectLogsHandler.is_enabled_monitor_cgroups_check(): - tracked_cgroups = initialize_cgroups_tracking(cpu_cgroup_path, memory_cgroup_path) - log_collector_monitor = get_log_collector_monitor_handler(tracked_cgroups) + for controller in tracked_controllers: + if isinstance(controller, _CpuController): + controller.initialize_cpu_usage() + break + log_collector_monitor = get_log_collector_monitor_handler(tracked_controllers) log_collector_monitor.run() - archive = log_collector.collect_logs_and_get_archive() + + archive, total_uncompressed_size = log_collector.collect_logs_and_get_archive() logger.info("Log collection successfully completed. Archive can be found at {0} " "and detailed log output can be found at {1}".format(archive, OUTPUT_RESULTS_FILE_PATH)) + + if log_collector_monitor is not None: + log_collector_monitor.stop() + try: + metrics_summary = log_collector_monitor.get_max_recorded_metrics() + metrics_summary['Total Uncompressed File Size (B)'] = total_uncompressed_size + msg = json.dumps(metrics_summary) + logger.info(msg) + event.add_event(op=event.WALAEventOperation.LogCollection, message=msg, log_event=False) + except Exception as e: + msg = "An error occurred while reporting log collector resource usage summary: {0}".format(ustr(e)) + logger.warn(msg) + event.add_event(op=event.WALAEventOperation.LogCollection, is_success=False, message=msg, log_event=False) + except Exception as e: logger.error("Log collection completed unsuccessfully. Error: {0}".format(ustr(e))) logger.info("Detailed log output can be found at {0}".format(OUTPUT_RESULTS_FILE_PATH)) @@ -328,7 +347,7 @@ def parse_args(sys_args): if arg == "": # Don't parse an empty parameter continue - m = re.match("^(?:[-/]*)configuration-path:([\w/\.\-_]+)", arg) # pylint: disable=W1401 + m = re.match(r"^(?:[-/]*)configuration-path:([\w/\.\-_]+)", arg) if not m is None: conf_file_path = m.group(1) if not os.path.exists(conf_file_path): diff --git a/azurelinuxagent/common/agent_supported_feature.py b/azurelinuxagent/common/agent_supported_feature.py index 694c636391..f22a72ea67 100644 --- a/azurelinuxagent/common/agent_supported_feature.py +++ b/azurelinuxagent/common/agent_supported_feature.py @@ -77,14 +77,15 @@ def __init__(self): class _GAVersioningGovernanceFeature(AgentSupportedFeature): """ CRP would drive the RSM update if agent reports that it does support RSM upgrades with this flag otherwise CRP fallback to largest version. - Agent doesn't report supported feature flag if auto update is disabled or old version of agent running that doesn't understand GA versioning. + Agent doesn't report supported feature flag if auto update is disabled or old version of agent running that doesn't understand GA versioning + or if explicitly support for versioning is disabled in agent Note: Especially Windows need this flag to report to CRP that GA doesn't support the updates. So linux adopted same flag to have a common solution. """ __NAME = SupportedFeatureNames.GAVersioningGovernance __VERSION = "1.0" - __SUPPORTED = conf.get_auto_update_to_latest_version() + __SUPPORTED = conf.get_auto_update_to_latest_version() and conf.get_enable_ga_versioning() def __init__(self): super(_GAVersioningGovernanceFeature, self).__init__(name=self.__NAME, diff --git a/azurelinuxagent/common/conf.py b/azurelinuxagent/common/conf.py index 6662285317..b5eec73ce2 100644 --- a/azurelinuxagent/common/conf.py +++ b/azurelinuxagent/common/conf.py @@ -35,7 +35,7 @@ class ConfigurationProvider(object): """ def __init__(self): - self.values = dict() + self.values = {} def load(self, content): if not content: @@ -146,7 +146,8 @@ def load_conf_from_file(conf_file_path, conf=__conf__): "Debug.CgroupDisableOnQuotaCheckFailure": True, "Debug.EnableAgentMemoryUsageCheck": False, "Debug.EnableFastTrack": True, - "Debug.EnableGAVersioning": True + "Debug.EnableGAVersioning": True, + "Debug.EnableCgroupV2ResourceLimiting": False } @@ -200,7 +201,8 @@ def load_conf_from_file(conf_file_path, conf=__conf__): "Debug.EtpCollectionPeriod": 300, "Debug.AutoUpdateHotfixFrequency": 14400, "Debug.AutoUpdateNormalFrequency": 86400, - "Debug.FirewallRulesLogPeriod": 86400 + "Debug.FirewallRulesLogPeriod": 86400, + "Debug.LogCollectorInitialDelay": 5 * 60 } @@ -670,7 +672,7 @@ def get_enable_ga_versioning(conf=__conf__): If True, the agent looks for rsm updates(checking requested version in GS) otherwise it will fall back to self-update and finds the highest version from PIR. NOTE: This option is experimental and may be removed in later versions of the Agent. """ - return conf.get_switch("Debug.EnableGAVersioning", False) + return conf.get_switch("Debug.EnableGAVersioning", True) def get_firewall_rules_log_period(conf=__conf__): @@ -680,3 +682,20 @@ def get_firewall_rules_log_period(conf=__conf__): NOTE: This option is experimental and may be removed in later versions of the Agent. """ return conf.get_int("Debug.FirewallRulesLogPeriod", 86400) + + +def get_enable_cgroup_v2_resource_limiting(conf=__conf__): + """ + If True, the agent will enable resource monitoring and enforcement for the log collector on machines using cgroup v2. + NOTE: This option is experimental and may be removed in later versions of the Agent. + """ + return conf.get_switch("Debug.EnableCgroupV2ResourceLimiting", False) + + +def get_log_collector_initial_delay(conf=__conf__): + """ + Determine the initial delay at service start before the first periodic log collection. + + NOTE: This option is experimental and may be removed in later versions of the Agent. + """ + return conf.get_int("Debug.LogCollectorInitialDelay", 5 * 60) diff --git a/azurelinuxagent/common/event.py b/azurelinuxagent/common/event.py index 41238f90b6..7e2b10c991 100644 --- a/azurelinuxagent/common/event.py +++ b/azurelinuxagent/common/event.py @@ -283,7 +283,7 @@ def _encode_message(op, message): def _log_event(name, op, message, duration, is_success=True): - global _EVENT_MSG # pylint: disable=W0603 + global _EVENT_MSG # pylint: disable=W0602, W0603 if not is_success: logger.error(_EVENT_MSG, name, op, message, duration) @@ -429,7 +429,7 @@ def initialize_vminfo_common_parameters(self, protocol): logger.warn("Failed to get VM info from goal state; will be missing from telemetry: {0}", ustr(e)) try: - imds_client = get_imds_client(protocol.get_endpoint()) + imds_client = get_imds_client() imds_info = imds_client.get_compute() parameters[CommonTelemetryEventSchema.Location].value = imds_info.location parameters[CommonTelemetryEventSchema.SubscriptionId].value = imds_info.subscriptionId @@ -605,7 +605,7 @@ def add_common_event_parameters(self, event, event_timestamp): TelemetryEventParam(CommonTelemetryEventSchema.OpcodeName, event_timestamp.strftime(logger.Logger.LogTimeFormatInUTC)), TelemetryEventParam(CommonTelemetryEventSchema.EventTid, threading.current_thread().ident), TelemetryEventParam(CommonTelemetryEventSchema.EventPid, os.getpid()), - TelemetryEventParam(CommonTelemetryEventSchema.TaskName, threading.current_thread().getName())] + TelemetryEventParam(CommonTelemetryEventSchema.TaskName, threading.current_thread().name)] if event.eventId == TELEMETRY_EVENT_EVENT_ID and event.providerId == TELEMETRY_EVENT_PROVIDER_ID: # Currently only the GuestAgentExtensionEvents has these columns, the other tables dont have them so skipping diff --git a/azurelinuxagent/common/future.py b/azurelinuxagent/common/future.py index be28ba9d88..bb914775ab 100644 --- a/azurelinuxagent/common/future.py +++ b/azurelinuxagent/common/future.py @@ -61,7 +61,6 @@ range = xrange int = long - if sys.version_info[1] >= 7: from collections import OrderedDict # For Py 2.7+ else: diff --git a/azurelinuxagent/common/logger.py b/azurelinuxagent/common/logger.py index 3d0dc617d3..3506a649ad 100644 --- a/azurelinuxagent/common/logger.py +++ b/azurelinuxagent/common/logger.py @@ -19,7 +19,7 @@ """ import sys from datetime import datetime, timedelta -from threading import currentThread +from threading import current_thread from azurelinuxagent.common.future import ustr @@ -137,7 +137,7 @@ def write_log(log_appender): # pylint: disable=W0612 msg = msg_format time = datetime.utcnow().strftime(Logger.LogTimeFormatInUTC) level_str = LogLevel.STRINGS[level] - thread_name = currentThread().getName() + thread_name = current_thread().name if self.prefix is not None: log_item = u"{0} {1} {2} {3} {4}\n".format(time, level_str, thread_name, self.prefix, msg) else: diff --git a/azurelinuxagent/common/osutil/default.py b/azurelinuxagent/common/osutil/default.py index f763d5f5cb..0a0fd0e1cd 100644 --- a/azurelinuxagent/common/osutil/default.py +++ b/azurelinuxagent/common/osutil/default.py @@ -16,6 +16,7 @@ # Requires Python 2.6+ and Openssl 1.0+ # +import array import base64 import datetime import errno @@ -26,15 +27,26 @@ import os import platform import pwd +import random import re import shutil import socket +import string import struct import sys import time from pwd import getpwall -import array +from azurelinuxagent.common.exception import OSUtilError +# 'crypt' was removed in Python 3.13; use legacycrypt instead +if sys.version_info[0] == 3 and sys.version_info[1] >= 13 or sys.version_info[0] > 3: + try: + from legacycrypt import crypt + except ImportError: + def crypt(password, salt): + raise OSUtilError("Please install the legacycrypt Python module to use this feature.") +else: + from crypt import crypt # pylint: disable=deprecated-module from azurelinuxagent.common import conf from azurelinuxagent.common import logger @@ -42,7 +54,6 @@ from azurelinuxagent.common.utils import shellutil from azurelinuxagent.common.utils import textutil -from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr, array_to_bytes from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.utils.flexible_version import FlexibleVersion @@ -58,7 +69,7 @@ if needed. """ -_IPTABLES_VERSION_PATTERN = re.compile("^[^\d\.]*([\d\.]+).*$") # pylint: disable=W1401 +_IPTABLES_VERSION_PATTERN = re.compile(r"^[^\d\.]*([\d\.]+).*$") _IPTABLES_LOCKING_VERSION = FlexibleVersion('1.4.21') @@ -106,7 +117,7 @@ def get_firewall_delete_conntrack_drop_command(wait, destination): "--ctstate", "INVALID,NEW", "-j", "DROP"]) -PACKET_PATTERN = "^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" # pylint: disable=W1401 +PACKET_PATTERN = r"^\s*(\d+)\s+(\d+)\s+DROP\s+.*{0}[^\d]*$" ALL_CPUS_REGEX = re.compile('^cpu .*') ALL_MEMS_REGEX = re.compile('^Mem.*') @@ -123,7 +134,7 @@ def get_firewall_delete_conntrack_drop_command(wait, destination): IOCTL_SIOCGIFHWADDR = 0x8927 IFNAMSIZ = 16 -IP_COMMAND_OUTPUT = re.compile('^\d+:\s+(\w+):\s+(.*)$') # pylint: disable=W1401 +IP_COMMAND_OUTPUT = re.compile(r'^\d+:\s+(\w+):\s+(.*)$') STORAGE_DEVICE_PATH = '/sys/bus/vmbus/devices/' GEN2_DEVICE_ID = 'f8b3781a-1e82-4818-a1c3-63d806ec15bb' @@ -433,11 +444,21 @@ def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user, " "will not set password.").format(username)) - passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) + passwd_hash = DefaultOSUtil.gen_password_hash(password, crypt_id, salt_len) self._run_command_raising_OSUtilError(["usermod", "-p", passwd_hash, username], err_msg="Failed to set password for {0}".format(username)) + @staticmethod + def gen_password_hash(password, crypt_id, salt_len): + collection = string.ascii_letters + string.digits + salt = ''.join(random.choice(collection) for _ in range(salt_len)) + salt = "${0}${1}".format(crypt_id, salt) + if sys.version_info[0] == 2: + # if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt + password = password.encode('utf-8') + return crypt(password, salt) + def get_users(self): return getpwall() @@ -1138,7 +1159,7 @@ def route_add(self, net, mask, gateway): # pylint: disable=W0613 Add specified route """ try: - cmd = ["ip", "route", "add", net, "via", gateway] + cmd = ["ip", "route", "add", str(net), "via", gateway] return shellutil.run_command(cmd) except CommandError: return "" diff --git a/azurelinuxagent/common/osutil/factory.py b/azurelinuxagent/common/osutil/factory.py index e2f15afb56..fd66fbb0e9 100644 --- a/azurelinuxagent/common/osutil/factory.py +++ b/azurelinuxagent/common/osutil/factory.py @@ -16,10 +16,9 @@ # -from distutils.version import LooseVersion as Version # pylint: disable=no-name-in-module, import-error - import azurelinuxagent.common.logger as logger from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_CODE_NAME, DISTRO_VERSION, DISTRO_FULL_NAME +from azurelinuxagent.common.utils.distro_version import DistroVersion from .alpine import AlpineOSUtil from .arch import ArchUtil from .bigip import BigIpOSUtil @@ -66,14 +65,14 @@ def _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name) return ClearLinuxUtil() if distro_name == "ubuntu": - ubuntu_version = Version(distro_version) - if ubuntu_version in [Version("12.04"), Version("12.10")]: + ubuntu_version = DistroVersion(distro_version) + if ubuntu_version in [DistroVersion("12.04"), DistroVersion("12.10")]: return Ubuntu12OSUtil() - if ubuntu_version in [Version("14.04"), Version("14.10")]: + if ubuntu_version in [DistroVersion("14.04"), DistroVersion("14.10")]: return Ubuntu14OSUtil() - if ubuntu_version in [Version('16.04'), Version('16.10'), Version('17.04')]: + if ubuntu_version in [DistroVersion('16.04'), DistroVersion('16.10'), DistroVersion('17.04')]: return Ubuntu16OSUtil() - if Version('18.04') <= ubuntu_version <= Version('24.04'): + if DistroVersion('18.04') <= ubuntu_version <= DistroVersion('24.04'): return Ubuntu18OSUtil() if distro_full_name == "Snappy Ubuntu Core": return UbuntuSnappyOSUtil() @@ -89,16 +88,16 @@ def _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name) if distro_name in ("flatcar", "coreos") or distro_code_name in ("flatcar", "coreos"): return CoreOSUtil() - if distro_name in ("suse", "sle_hpc", "sles", "opensuse"): + if distro_name in ("suse", "sle-micro", "sle_hpc", "sles", "opensuse"): if distro_full_name == 'SUSE Linux Enterprise Server' \ - and Version(distro_version) < Version('12') \ - or distro_full_name == 'openSUSE' and Version(distro_version) < Version('13.2'): + and DistroVersion(distro_version) < DistroVersion('12') \ + or distro_full_name == 'openSUSE' and DistroVersion(distro_version) < DistroVersion('13.2'): return SUSE11OSUtil() return SUSEOSUtil() if distro_name == "debian": - if "sid" in distro_version or Version(distro_version) > Version("7"): + if "sid" in distro_version or DistroVersion(distro_version) > DistroVersion("7"): return DebianOSModernUtil() return DebianOSBaseUtil() @@ -109,16 +108,15 @@ def _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name) # to distinguish between debian and devuan. The new distro.linux_distribution module # is able to distinguish between the two. - if distro_name == "devuan" and Version(distro_version) >= Version("4"): + if distro_name == "devuan" and DistroVersion(distro_version) >= DistroVersion("4"): return DevuanOSUtil() - if distro_name in ("redhat", "rhel", "centos", "oracle", "almalinux", "cloudlinux", "rocky"): - if Version(distro_version) < Version("7"): + if DistroVersion(distro_version) < DistroVersion("7"): return Redhat6xOSUtil() - if Version(distro_version) >= Version("8.6"): + if DistroVersion(distro_version) >= DistroVersion("8.6"): return RedhatOSModernUtil() return RedhatOSUtil() @@ -144,7 +142,7 @@ def _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name) if distro_name == "iosxe": return IosxeOSUtil() - if distro_name == "mariner": + if distro_name in ["mariner", "azurelinux"]: return MarinerOSUtil() if distro_name == "nsbsd": diff --git a/azurelinuxagent/common/osutil/freebsd.py b/azurelinuxagent/common/osutil/freebsd.py index f8ee6db81b..1fcfa91677 100644 --- a/azurelinuxagent/common/osutil/freebsd.py +++ b/azurelinuxagent/common/osutil/freebsd.py @@ -77,7 +77,7 @@ def chpasswd(self, username, password, crypt_id=6, salt_len=10): if self.is_sys_user(username): raise OSUtilError(("User {0} is a system user, " "will not set password.").format(username)) - passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) + passwd_hash = DefaultOSUtil.gen_password_hash(password, crypt_id, salt_len) self._run_command_raising_OSUtilError(['pw', 'usermod', username, '-H', '0'], cmd_input=passwd_hash, err_msg="Failed to set password for {0}".format(username)) @@ -150,7 +150,7 @@ def _get_netstat_rn_ipv4_routes(): route_header_line = output_lines.index("Internet:") + 1 # Parse the file structure and left justify the routes route_start_line = route_header_line + 1 - route_line_length = max([len(line) for line in output_lines[route_header_line:]]) + route_line_length = max(len(line) for line in output_lines[route_header_line:]) netstat_route_list = [line.ljust(route_line_length) for line in output_lines[route_start_line:]] # Parse the headers _route_headers = output_lines[route_header_line].split() @@ -551,7 +551,7 @@ def device_for_ide_port(self, port_id): err, output = shellutil.run_get_output(cmd_search_blkvsc) if err == 0: output = output.rstrip() - cmd_search_dev = "camcontrol devlist | grep {0} | awk -F \( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) # pylint: disable=W1401 + cmd_search_dev = "camcontrol devlist | grep {0} | awk -F \\( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) err, output = shellutil.run_get_output(cmd_search_dev) if err == 0: for possible in output.rstrip().split(','): @@ -562,7 +562,7 @@ def device_for_ide_port(self, port_id): err, output = shellutil.run_get_output(cmd_search_storvsc) if err == 0: output = output.rstrip() - cmd_search_dev = "camcontrol devlist | grep {0} | awk -F \( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) # pylint: disable=W1401 + cmd_search_dev = "camcontrol devlist | grep {0} | awk -F \\( '{{print $2}}'|sed -e 's/.*(//'| sed -e 's/).*//'".format(output) err, output = shellutil.run_get_output(cmd_search_dev) if err == 0: for possible in output.rstrip().split(','): diff --git a/azurelinuxagent/common/osutil/gaia.py b/azurelinuxagent/common/osutil/gaia.py index 849d5d1fa1..e11f482db5 100644 --- a/azurelinuxagent/common/osutil/gaia.py +++ b/azurelinuxagent/common/osutil/gaia.py @@ -29,7 +29,6 @@ from azurelinuxagent.common.utils.cryptutil import CryptUtil import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.utils.shellutil as shellutil -import azurelinuxagent.common.utils.textutil as textutil class GaiaOSUtil(DefaultOSUtil): @@ -64,7 +63,7 @@ def useradd(self, username, expiration=None, comment=None): def chpasswd(self, username, password, crypt_id=6, salt_len=10): logger.info('chpasswd') - passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) + passwd_hash = DefaultOSUtil.gen_password_hash(password, crypt_id, salt_len) ret, out = self._run_clish( 'set user admin password-hash ' + passwd_hash) if ret != 0: @@ -179,7 +178,7 @@ def _address_to_string(self, addr): return socket.inet_ntoa(struct.pack("!I", addr)) def _get_prefix(self, mask): - return str(sum([bin(int(x)).count('1') for x in mask.split('.')])) + return str(sum(bin(int(x)).count('1') for x in mask.split('.'))) def route_add(self, net, mask, gateway): logger.info('route_add {0} {1} {2}', net, mask, gateway) diff --git a/azurelinuxagent/common/osutil/openwrt.py b/azurelinuxagent/common/osutil/openwrt.py index d99f0321c5..81e352eb2e 100644 --- a/azurelinuxagent/common/osutil/openwrt.py +++ b/azurelinuxagent/common/osutil/openwrt.py @@ -25,15 +25,16 @@ from azurelinuxagent.common.osutil.default import DefaultOSUtil from azurelinuxagent.common.utils.networkutil import NetworkInterfaceCard -class OpenWRTOSUtil(DefaultOSUtil): +class OpenWRTOSUtil(DefaultOSUtil): def __init__(self): super(OpenWRTOSUtil, self).__init__() self.agent_conf_file_path = '/etc/waagent.conf' self.dhclient_name = 'udhcpc' - self.ip_command_output = re.compile('^\d+:\s+(\w+):\s+(.*)$') # pylint: disable=W1401 self.jit_enabled = True - + + _ip_command_output = re.compile(r'^\d+:\s+(\w+):\s+(.*)$') + def eject_dvd(self, chk_err=True): logger.warn('eject is not supported on OpenWRT') @@ -79,18 +80,18 @@ def get_nic_state(self, as_string=False): return {} for entry in output.splitlines(): - result = self.ip_command_output.match(entry) + result = OpenWRTOSUtil._ip_command_output.match(entry) if result: name = result.group(1) state[name] = NetworkInterfaceCard(name, result.group(2)) - self._update_nic_state(state, "ip -o -f inet address", NetworkInterfaceCard.add_ipv4, "an IPv4 address") self._update_nic_state(state, "ip -o -f inet6 address", NetworkInterfaceCard.add_ipv6, "an IPv6 address") return state - def _update_nic_state(self, state, ip_command, handler, description): + @staticmethod + def _update_nic_state(state, ip_command, handler, description): """ Update the state of NICs based on the output of a specified ip subcommand. @@ -104,7 +105,7 @@ def _update_nic_state(self, state, ip_command, handler, description): return for entry in output.splitlines(): - result = self.ip_command_output.match(entry) + result = OpenWRTOSUtil._ip_command_output.match(entry) if result: interface_name = result.group(1) if interface_name in state: diff --git a/azurelinuxagent/common/protocol/goal_state.py b/azurelinuxagent/common/protocol/goal_state.py index 2eb89c1ebb..607710e3f3 100644 --- a/azurelinuxagent/common/protocol/goal_state.py +++ b/azurelinuxagent/common/protocol/goal_state.py @@ -212,7 +212,7 @@ def update(self, silent=False): except GoalStateInconsistentError as e: message = "Detected an inconsistency in the goal state: {0}".format(ustr(e)) self.logger.warn(message) - add_event(op=WALAEventOperation.GoalState, is_success=False, message=message) + add_event(op=WALAEventOperation.GoalState, is_success=False, log_event=False, message=message) self._update(force_update=True) @@ -503,7 +503,7 @@ def _fetch_full_wire_server_goal_state(self, incarnation, xml_doc): if GoalStateProperties.RemoteAccessInfo & self._goal_state_properties: remote_access_uri = findtext(container, "RemoteAccessInfo") if remote_access_uri is not None: - xml_text = self._wire_client.fetch_config(remote_access_uri, self._wire_client.get_header_for_cert()) + xml_text = self._wire_client.fetch_config(remote_access_uri, self._wire_client.get_header_for_remote_access()) remote_access = RemoteAccess(xml_text) if self._save_to_history: self._history.save_remote_access(xml_text) diff --git a/azurelinuxagent/common/protocol/healthservice.py b/azurelinuxagent/common/protocol/healthservice.py index 3abe7299b9..e227cb9279 100644 --- a/azurelinuxagent/common/protocol/healthservice.py +++ b/azurelinuxagent/common/protocol/healthservice.py @@ -73,7 +73,7 @@ def __init__(self, endpoint): self.api = HealthService.API self.version = HealthService.VERSION self.source = HealthService.OBSERVER_NAME - self.observations = list() + self.observations = [] @property def as_json(self): diff --git a/azurelinuxagent/common/protocol/hostplugin.py b/azurelinuxagent/common/protocol/hostplugin.py index 0aaff2184d..cdc0219ae2 100644 --- a/azurelinuxagent/common/protocol/hostplugin.py +++ b/azurelinuxagent/common/protocol/hostplugin.py @@ -20,6 +20,7 @@ import datetime import json import os.path +import threading import uuid from azurelinuxagent.common import logger, conf @@ -423,19 +424,24 @@ def _get_fast_track_state_file(): # This file keeps the timestamp of the most recent goal state if it was retrieved via Fast Track return os.path.join(conf.get_lib_dir(), "fast_track.json") + # Multiple threads create instances of HostPluginProtocol; we use this lock to protect access to the state file for Fast Track + _fast_track_state_lock = threading.RLock() + @staticmethod def _save_fast_track_state(timestamp): try: - with open(HostPluginProtocol._get_fast_track_state_file(), "w") as file_: - json.dump({"timestamp": timestamp}, file_) + with HostPluginProtocol._fast_track_state_lock: + with open(HostPluginProtocol._get_fast_track_state_file(), "w") as file_: + json.dump({"timestamp": timestamp}, file_) except Exception as e: logger.warn("Error updating the Fast Track state ({0}): {1}", HostPluginProtocol._get_fast_track_state_file(), ustr(e)) @staticmethod def clear_fast_track_state(): try: - if os.path.exists(HostPluginProtocol._get_fast_track_state_file()): - os.remove(HostPluginProtocol._get_fast_track_state_file()) + with HostPluginProtocol._fast_track_state_lock: + if os.path.exists(HostPluginProtocol._get_fast_track_state_file()): + os.remove(HostPluginProtocol._get_fast_track_state_file()) except Exception as e: logger.warn("Error clearing the current state for Fast Track ({0}): {1}", HostPluginProtocol._get_fast_track_state_file(), ustr(e)) @@ -446,16 +452,17 @@ def get_fast_track_timestamp(): Returns the timestamp of the most recent FastTrack goal state retrieved by fetch_vm_settings(), or None if the most recent goal state was Fabric or fetch_vm_settings() has not been invoked. """ - if not os.path.exists(HostPluginProtocol._get_fast_track_state_file()): - return timeutil.create_timestamp(datetime.datetime.min) + with HostPluginProtocol._fast_track_state_lock: + if not os.path.exists(HostPluginProtocol._get_fast_track_state_file()): + return timeutil.create_timestamp(datetime.datetime.min) - try: - with open(HostPluginProtocol._get_fast_track_state_file(), "r") as file_: - return json.load(file_)["timestamp"] - except Exception as e: - logger.warn("Can't retrieve the timestamp for the most recent Fast Track goal state ({0}), will assume the current time. Error: {1}", - HostPluginProtocol._get_fast_track_state_file(), ustr(e)) - return timeutil.create_timestamp(datetime.datetime.utcnow()) + try: + with open(HostPluginProtocol._get_fast_track_state_file(), "r") as file_: + return json.load(file_)["timestamp"] + except Exception as e: + logger.warn("Can't retrieve the timestamp for the most recent Fast Track goal state ({0}), will assume the current time. Error: {1}", + HostPluginProtocol._get_fast_track_state_file(), ustr(e)) + return timeutil.create_timestamp(datetime.datetime.utcnow()) def fetch_vm_settings(self, force_update=False): """ diff --git a/azurelinuxagent/common/protocol/imds.py b/azurelinuxagent/common/protocol/imds.py index 5b9e206a13..fba88e0eee 100644 --- a/azurelinuxagent/common/protocol/imds.py +++ b/azurelinuxagent/common/protocol/imds.py @@ -27,8 +27,8 @@ IMDS_INTERNAL_SERVER_ERROR = 3 -def get_imds_client(wireserver_endpoint): - return ImdsClient(wireserver_endpoint) +def get_imds_client(): + return ImdsClient() # A *slightly* future proof list of endorsed distros. @@ -256,7 +256,7 @@ def image_origin(self): class ImdsClient(object): - def __init__(self, wireserver_endpoint, version=APIVERSION): + def __init__(self, version=APIVERSION): self._api_version = version self._headers = { 'User-Agent': restutil.HTTP_USER_AGENT, @@ -268,7 +268,6 @@ def __init__(self, wireserver_endpoint, version=APIVERSION): } self._regex_ioerror = re.compile(r".*HTTP Failed. GET http://[^ ]+ -- IOError .*") self._regex_throttled = re.compile(r".*HTTP Retry. GET http://[^ ]+ -- Status Code 429 .*") - self._wireserver_endpoint = wireserver_endpoint def _get_metadata_url(self, endpoint, resource_path): return BASE_METADATA_URI.format(endpoint, resource_path, self._api_version) @@ -326,14 +325,12 @@ def get_metadata(self, resource_path, is_health): endpoint = IMDS_ENDPOINT status, resp = self._get_metadata_from_endpoint(endpoint, resource_path, headers) - if status == IMDS_CONNECTION_ERROR: - endpoint = self._wireserver_endpoint - status, resp = self._get_metadata_from_endpoint(endpoint, resource_path, headers) if status == IMDS_RESPONSE_SUCCESS: return MetadataResult(True, False, resp) elif status == IMDS_INTERNAL_SERVER_ERROR: return MetadataResult(False, True, resp) + # else it's a client-side error, e.g. IMDS_CONNECTION_ERROR return MetadataResult(False, False, resp) def get_compute(self): diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py index c93624cb1b..751dd2afb8 100644 --- a/azurelinuxagent/common/protocol/wire.py +++ b/azurelinuxagent/common/protocol/wire.py @@ -38,7 +38,7 @@ ResourceGoneError, ExtensionDownloadError, InvalidContainerError, ProtocolError, HttpError, ExtensionErrorCodes from azurelinuxagent.common.future import httpclient, bytebuffer, ustr from azurelinuxagent.common.protocol.goal_state import GoalState, TRANSPORT_CERT_FILE_NAME, TRANSPORT_PRV_FILE_NAME, \ - GoalStateProperties + GoalStateProperties, GoalStateInconsistentError from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol from azurelinuxagent.common.protocol.restapi import DataContract, ProvisionStatus, VMInfo, VMStatus from azurelinuxagent.common.telemetryevent import GuestAgentExtensionEventsSchema @@ -86,7 +86,22 @@ def detect(self, init_goal_state=True, save_to_history=False): # Initialize the goal state, including all the inner properties if init_goal_state: logger.info('Initializing goal state during protocol detection') - self.client.reset_goal_state(save_to_history=save_to_history) + # + # TODO: Currently protocol detection retrieves the entire goal state. This is not needed; in particular, retrieving the Extensions goal state + # is not needed. However, the goal state is cached in self.client._goal_state and other components, including the Extension Handler, + # depend on this cached value. This has been a long-standing issue that causes multiple problems. Before removing the cached goal state, + # though, a careful review of these dependencies is needed. + # + # One of the problems of fetching the full goal state is that issues while retrieving it can block protocol detection and make the + # Agent go into a retry loop that can last 1 full hour. One particular error, GoalStateInconsistentError, can arise if the certificates + # needed by extensions are missing from the goal state; for example, if a FastTrack goal state is out of sync with the corresponding + # Fabric goal state that contains the certificates, or if decryption of the certificates fais (and hence, the certificate list is + # empty). The try/except below handles only this one particular problem. + # + try: + self.client.reset_goal_state(save_to_history=save_to_history) + except GoalStateInconsistentError as error: + logger.warn("{0}", ustr(error)) def update_host_plugin_from_goal_state(self): self.client.update_host_plugin_from_goal_state() @@ -682,8 +697,8 @@ def _try_expand_zip_package(package_type, target_file, target_directory): if os.path.exists(target_directory): try: shutil.rmtree(target_directory) - except Exception as exception: - logger.warn("Cannot delete {0}: {1}", target_directory, ustr(exception)) + except Exception as rmtree_exception: + logger.warn("Cannot delete {0}: {1}", target_directory, ustr(rmtree_exception)) raise finally: try: @@ -886,11 +901,11 @@ def _call_hostplugin_with_container_check(self, host_func): message=msg, log_event=True) return ret - except (ResourceGoneError, InvalidContainerError) as error: + except (ResourceGoneError, InvalidContainerError) as host_error: msg = "[PERIODIC] Request failed using the host plugin channel after goal state refresh. " \ "ContainerId changed from {0} to {1}, role config file changed from {2} to {3}. " \ "Exception type: {4}.".format(old_container_id, new_container_id, old_role_config_name, - new_role_config_name, type(error).__name__) + new_role_config_name, type(host_error).__name__) add_periodic(delta=logger.EVERY_SIX_HOURS, name=AGENT_NAME, version=CURRENT_VERSION, @@ -1126,6 +1141,12 @@ def get_header_for_xml_content(self): } def get_header_for_cert(self): + return self._get_header_for_encrypted_request("DES_EDE3_CBC") + + def get_header_for_remote_access(self): + return self._get_header_for_encrypted_request("AES128_CBC") + + def _get_header_for_encrypted_request(self, cypher): trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) try: content = fileutil.read_file(trans_cert_file) @@ -1136,7 +1157,7 @@ def get_header_for_cert(self): return { "x-ms-agent-name": "WALinuxAgent", "x-ms-version": PROTOCOL_VERSION, - "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-cipher-name": cypher, "x-ms-guest-agent-public-x509-cert": cert } diff --git a/azurelinuxagent/common/singletonperthread.py b/azurelinuxagent/common/singletonperthread.py index 0d9139012e..c7bcda803d 100644 --- a/azurelinuxagent/common/singletonperthread.py +++ b/azurelinuxagent/common/singletonperthread.py @@ -1,4 +1,4 @@ -from threading import Lock, currentThread +from threading import Lock, current_thread class _SingletonPerThreadMetaClass(type): @@ -8,7 +8,8 @@ class _SingletonPerThreadMetaClass(type): def __call__(cls, *args, **kwargs): with cls._lock: - obj_name = "%s__%s" % (cls.__name__, currentThread().getName()) # Object Name = className__threadName + # Object Name = className__threadName + obj_name = "%s__%s" % (cls.__name__, current_thread().name) if obj_name not in cls._instances: cls._instances[obj_name] = super(_SingletonPerThreadMetaClass, cls).__call__(*args, **kwargs) return cls._instances[obj_name] diff --git a/azurelinuxagent/common/utils/cryptutil.py b/azurelinuxagent/common/utils/cryptutil.py index b7c9422747..bed829ae67 100644 --- a/azurelinuxagent/common/utils/cryptutil.py +++ b/azurelinuxagent/common/utils/cryptutil.py @@ -132,7 +132,7 @@ def asn1_to_ssh(self, pubkey): keydata_base64 = base64.b64encode(bytebuffer(keydata)) return ustr(b"ssh-rsa " + keydata_base64 + b"\n", encoding='utf-8') - except ImportError as e: + except ImportError: raise CryptError("Failed to load pyasn1.codec.der") def num_to_bytes(self, num): diff --git a/azurelinuxagent/common/utils/distro_version.py b/azurelinuxagent/common/utils/distro_version.py new file mode 100644 index 0000000000..8a447f6b21 --- /dev/null +++ b/azurelinuxagent/common/utils/distro_version.py @@ -0,0 +1,115 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2020 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ +# + +""" +""" + +import re + + +class DistroVersion(object): + """ + Distro versions (as exposed by azurelinuxagent.common.version.DISTRO_VERSION) can be very arbitrary: + + 9.2.0 + 0.0.0.0_99496 + 10.0_RC2 + 1.4-rolling-202402090309 + 2015.11-git + 2023 + 2023.02.1 + 2.1-systemd-rc1 + 2308a + 3.11.2-dev20240212t1512utc-autotag + 3.11.2-rc.1 + 3.1.22-1.8 + 8.1.3-p1-24838 + 8.1.3-p8-khilan.unadkat-08415223c9a99546b566df0dbc683ffa378cfd77 + 9.13.1P8X1 + 9.13.1RC1 + 9.2.0-beta1-25971 + a + ArrayOS + bookworm/sid + Clawhammer__9.14.0 + FFFF + h + JNPR-11.0-20200922.4042921_build + lighthouse-23.10.0 + Lighthouse__9.13.1 + linux-os-31700 + Mightysquirrel__9.15.0 + n/a + NAME="SLES" + ngfw-6.10.13.26655.fips.2 + r11427-9ce6aa9d8d + SonicOSX 7.1.1-7047-R3003-HF24239 + unstable + vsbc-x86_pi3-6.10.3 + vsbc-x86_pi3-6.12.2pre02 + + The DistroVersion allows to compare these versions following an strategy similar to the now deprecated distutils.LooseVersion: + versions consist of a series of sequences of numbers, alphabetic characters, or any other characters, optionally separated dots + (the dots themselves are stripped out). When comparing versions the numeric components are compared numerically, while the + other components are compared lexicographically. + + NOTE: For entities with simpler version schemes (e.g. extensions and the Agent), use FlexibleVersion. + + """ + def __init__(self, version): + self._version = version + self._fragments = [ + int(x) if DistroVersion._number_re.match(x) else x + for x in DistroVersion._fragment_re.split(self._version) if x != '' and x != '.' + ] + + _fragment_re = re.compile(r'(\d+|[a-z]+|\.)', re.IGNORECASE) + + _number_re = re.compile(r'\d+') + + def __str__(self): + return self._version + + def __repr__(self): + return str(self) + + def __eq__(self, other): + return self._compare(other) == 0 + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def _compare(self, other): + if isinstance(other, str): + other = DistroVersion(other) + + if self._fragments < other._fragments: + return -1 + if self._fragments > other._fragments: + return 1 + return 0 diff --git a/azurelinuxagent/common/utils/flexible_version.py b/azurelinuxagent/common/utils/flexible_version.py index c616da1ca3..40fd5306c4 100644 --- a/azurelinuxagent/common/utils/flexible_version.py +++ b/azurelinuxagent/common/utils/flexible_version.py @@ -17,13 +17,16 @@ # Requires Python 2.6+ and Openssl 1.0+ # -from distutils import version # pylint: disable=no-name-in-module import re -class FlexibleVersion(version.Version): +class FlexibleVersion(object): """ - A more flexible implementation of distutils.version.StrictVersion + A more flexible implementation of distutils.version.StrictVersion. + + NOTE: Use this class for generic version comparisons, e.g. extension and Agent + versions. Distro versions can be very arbitrary and should be handled + using the DistroVersion class. The implementation allows to specify: - an arbitrary number of version numbers: @@ -41,8 +44,6 @@ class FlexibleVersion(version.Version): """ def __init__(self, vstring=None, sep='.', prerel_tags=('alpha', 'beta', 'rc')): - version.Version.__init__(self) - if sep is None: sep = '.' if prerel_tags is None: @@ -195,7 +196,7 @@ def _compile_pattern(self): if self.prerel_tags: tags = '|'.join(re.escape(tag) for tag in self.prerel_tags) self.prerel_tags_set = dict(zip(self.prerel_tags, range(len(self.prerel_tags)))) - release_re = '(?:{prerel_sep}(?P<{tn}>{tags})(?P<{nn}>\d*))?'.format( # pylint: disable=W1401 + release_re = r'(?:{prerel_sep}(?P<{tn}>{tags})(?P<{nn}>\d*))?'.format( prerel_sep=self._re_prerel_sep, tags=tags, tn=self._nn_prerel_tag, diff --git a/azurelinuxagent/common/utils/textutil.py b/azurelinuxagent/common/utils/textutil.py index 1ff7a7e912..4a0f9a7541 100644 --- a/azurelinuxagent/common/utils/textutil.py +++ b/azurelinuxagent/common/utils/textutil.py @@ -17,11 +17,8 @@ # Requires Python 2.6+ and Openssl 1.0+ import base64 -import crypt import hashlib -import random import re -import string import struct import sys import traceback @@ -287,16 +284,6 @@ def remove_bom(c): return c -def gen_password_hash(password, crypt_id, salt_len): - collection = string.ascii_letters + string.digits - salt = ''.join(random.choice(collection) for _ in range(salt_len)) - salt = "${0}${1}".format(crypt_id, salt) - if sys.version_info[0] == 2: - # if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt - password = password.encode('utf-8') - return crypt.crypt(password, salt) - - def get_bytes_from_pem(pem_str): base64_bytes = "" for line in pem_str.split('\n'): diff --git a/azurelinuxagent/common/version.py b/azurelinuxagent/common/version.py index d9c088b4a5..9700236c0d 100644 --- a/azurelinuxagent/common/version.py +++ b/azurelinuxagent/common/version.py @@ -62,8 +62,8 @@ def get_f5_platform(): the version and product information is contained in the /VERSION file. """ result = [None, None, None, None] - f5_version = re.compile("^Version: (\d+\.\d+\.\d+)") # pylint: disable=W1401 - f5_product = re.compile("^Product: ([\w-]+)") # pylint: disable=W1401 + f5_version = re.compile(r"^Version: (\d+\.\d+\.\d+)") + f5_product = re.compile(r"^Product: ([\w-]+)") with open('/VERSION', 'r') as fh: content = fh.readlines() @@ -105,15 +105,15 @@ def get_checkpoint_platform(): def get_distro(): if 'FreeBSD' in platform.system(): - release = re.sub('\-.*\Z', '', ustr(platform.release())) # pylint: disable=W1401 + release = re.sub(r'\-.*\Z', '', ustr(platform.release())) osinfo = ['freebsd', release, '', 'freebsd'] elif 'OpenBSD' in platform.system(): - release = re.sub('\-.*\Z', '', ustr(platform.release())) # pylint: disable=W1401 + release = re.sub(r'\-.*\Z', '', ustr(platform.release())) osinfo = ['openbsd', release, '', 'openbsd'] elif 'Linux' in platform.system(): osinfo = get_linux_distribution(0, 'alpine') elif 'NS-BSD' in platform.system(): - release = re.sub('\-.*\Z', '', ustr(platform.release())) # pylint: disable=W1401 + release = re.sub(r'\-.*\Z', '', ustr(platform.release())) osinfo = ['nsbsd', release, '', 'nsbsd'] else: try: @@ -209,7 +209,7 @@ def has_logrotate(): # # When doing a release, be sure to use the actual agent version. Current agent version: 2.4.0.0 # -AGENT_VERSION = '2.11.1.12' +AGENT_VERSION = '2.12.0.2' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """ The Azure Linux Agent supports the provisioning and running of Linux @@ -222,13 +222,13 @@ def has_logrotate(): AGENT_PATTERN = "{0}-(.*)".format(AGENT_NAME) AGENT_NAME_PATTERN = re.compile(AGENT_PATTERN) -AGENT_PKG_PATTERN = re.compile(AGENT_PATTERN+"\.zip") # pylint: disable=W1401 +AGENT_PKG_PATTERN = re.compile(AGENT_PATTERN+r"\.zip") AGENT_DIR_PATTERN = re.compile(".*/{0}".format(AGENT_PATTERN)) # The execution mode of the VM - IAAS or PAAS. Linux VMs are only executed in IAAS mode. AGENT_EXECUTION_MODE = "IAAS" -EXT_HANDLER_PATTERN = b".*/WALinuxAgent-(\d+.\d+.\d+[.\d+]*).*-run-exthandlers" # pylint: disable=W1401 +EXT_HANDLER_PATTERN = br".*/WALinuxAgent-(\d+.\d+.\d+[.\d+]*).*-run-exthandlers" EXT_HANDLER_REGEX = re.compile(EXT_HANDLER_PATTERN) __distro__ = get_distro() diff --git a/azurelinuxagent/daemon/main.py b/azurelinuxagent/daemon/main.py index 342daf4ac9..3a3923a8ff 100644 --- a/azurelinuxagent/daemon/main.py +++ b/azurelinuxagent/daemon/main.py @@ -160,7 +160,7 @@ def daemon(self, child_args=None): # current values. protocol = self.protocol_util.get_protocol() - goal_state = GoalState(protocol, goal_state_properties=GoalStateProperties.SharedConfig) + goal_state = GoalState(protocol.client, goal_state_properties=GoalStateProperties.SharedConfig) setup_rdma_device(nd_version, goal_state.shared_conf) except Exception as e: diff --git a/azurelinuxagent/ga/agent_update_handler.py b/azurelinuxagent/ga/agent_update_handler.py index 8caec10873..ee6a44f9f7 100644 --- a/azurelinuxagent/ga/agent_update_handler.py +++ b/azurelinuxagent/ga/agent_update_handler.py @@ -29,24 +29,44 @@ from azurelinuxagent.ga.self_update_version_updater import SelfUpdateVersionUpdater +class UpdateMode(object): + """ + Enum for Update modes + """ + RSM = "RSM" + SelfUpdate = "SelfUpdate" + + def get_agent_update_handler(protocol): return AgentUpdateHandler(protocol) +RSM_UPDATE_STATE_FILE = "waagent_rsm_update" +INITIAL_UPDATE_STATE_FILE = "waagent_initial_update" + + class AgentUpdateHandler(object): """ This class handles two type of agent updates. Handler initializes the updater to SelfUpdateVersionUpdater and switch to appropriate updater based on below conditions: - RSM update: This is the update requested by RSM. The contract between CRP and agent is we get following properties in the goal state: + RSM update: This update requested by RSM and contract between CRP and agent is we get following properties in the goal state: version: it will have what version to update isVersionFromRSM: True if the version is from RSM deployment. isVMEnabledForRSMUpgrades: True if the VM is enabled for RSM upgrades. - if vm enabled for RSM upgrades, we use RSM update path. But if requested update is not by rsm deployment + if vm enabled for RSM upgrades, we use RSM update path. But if requested update is not by rsm deployment( if isVersionFromRSM:False) we ignore the update. - Self update: We fallback to this if above is condition not met. This update to the largest version available in the manifest + Self update: We fallback to this if above condition not met. This update to the largest version available in the manifest. + Also, we use self-update for initial update due to [1][2] Note: Self-update don't support downgrade. - Handler keeps the rsm state of last update is with RSM or not on every new goal state. Once handler decides which updater to use, then - does following steps: + [1] New vms that are enrolled into RSM, they get isVMEnabledForRSMUpgrades as True and isVersionFromRSM as False in first goal state. As per RSM update flow mentioned above, + we don't apply the update if isVersionFromRSM is false. Consequently, new vms remain on pre-installed agent until RSM drives a new version update. In the meantime, agent may process the extensions with the baked version. + This can potentially lead to issues due to incompatibility. + [2] If current version is N, and we are deploying N+1. We find an issue on N+1 and remove N+1 from PIR. If CRP created the initial goal state for a new vm + before the delete, the version in the goal state would be N+1; If the agent starts processing the goal state after the deleting, it won't find N+1 and update will fail and + the vm will use baked version. + + Handler updates the state if current update mode is changed from last update mode(RSM or Self-Update) on new goal state. Once handler decides which updater to use, then + updater does following steps: 1. Retrieve the agent version from the goal state. 2. Check if we allowed to update for that version. 3. Log the update message. @@ -63,8 +83,8 @@ def __init__(self, protocol): self._daemon_version = self._get_daemon_version_for_update() self._last_attempted_update_error_msg = "" - # restore the state of rsm update. Default to self-update if last update is not with RSM. - if not self._get_is_last_update_with_rsm(): + # Restore the state of rsm update. Default to self-update if last update is not with RSM or if agent doing initial update + if not self._get_is_last_update_with_rsm() or self._is_initial_update(): self._updater = SelfUpdateVersionUpdater(self._gs_id) else: self._updater = RSMVersionUpdater(self._gs_id, self._daemon_version) @@ -78,14 +98,39 @@ def _get_daemon_version_for_update(): # use the min version as 2.2.53 as we started setting the daemon version starting 2.2.53. return FlexibleVersion("2.2.53") + @staticmethod + def _get_initial_update_state_file(): + """ + This file keeps if initial update is attempted or not + """ + return os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE) + + def _save_initial_update_state_file(self): + """ + Save the file if agent attempted initial update + """ + try: + with open(self._get_initial_update_state_file(), "w"): + pass + except Exception as e: + msg = "Error creating the initial update state file ({0}): {1}".format(self._get_initial_update_state_file(), ustr(e)) + logger.warn(msg) + add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False) + + def _is_initial_update(self): + """ + Returns True if state file doesn't exit as presence of file consider as initial update already attempted + """ + return not os.path.exists(self._get_initial_update_state_file()) + @staticmethod def _get_rsm_update_state_file(): """ This file keeps if last attempted update is rsm or not. """ - return os.path.join(conf.get_lib_dir(), "rsm_update.json") + return os.path.join(conf.get_lib_dir(), RSM_UPDATE_STATE_FILE) - def _save_rsm_update_state(self): + def _save_rsm_update_state_file(self): """ Save the rsm state empty file when we switch to RSM """ @@ -93,9 +138,11 @@ def _save_rsm_update_state(self): with open(self._get_rsm_update_state_file(), "w"): pass except Exception as e: - logger.warn("Error creating the RSM state ({0}): {1}", self._get_rsm_update_state_file(), ustr(e)) + msg = "Error creating the RSM state file ({0}): {1}".format(self._get_rsm_update_state_file(), ustr(e)) + logger.warn(msg) + add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False) - def _remove_rsm_update_state(self): + def _remove_rsm_update_state_file(self): """ Remove the rsm state file when we switch to self-update """ @@ -103,7 +150,9 @@ def _remove_rsm_update_state(self): if os.path.exists(self._get_rsm_update_state_file()): os.remove(self._get_rsm_update_state_file()) except Exception as e: - logger.warn("Error removing the RSM state ({0}): {1}", self._get_rsm_update_state_file(), ustr(e)) + msg = "Error removing the RSM state file ({0}): {1}".format(self._get_rsm_update_state_file(), ustr(e)) + logger.warn(msg) + add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False) def _get_is_last_update_with_rsm(self): """ @@ -138,6 +187,15 @@ def _get_agent_family_manifest(self, goal_state): family, self._gs_id)) return agent_family_manifests[0] + def get_current_update_mode(self): + """ + Returns current update mode whether RSM or Self-Update + """ + if isinstance(self._updater, RSMVersionUpdater): + return UpdateMode.RSM + else: + return UpdateMode.SelfUpdate + def run(self, goal_state, ext_gs_updated): try: @@ -147,30 +205,36 @@ def run(self, goal_state, ext_gs_updated): # Update the state only on new goal state if ext_gs_updated: + # Reset the last reported update state on new goal state before we attempt update otherwise we keep reporting the last update error if any + self._last_attempted_update_error_msg = "" self._gs_id = goal_state.extensions_goal_state.id self._updater.sync_new_gs_id(self._gs_id) agent_family = self._get_agent_family_manifest(goal_state) - # Updater will return True or False if we need to switch the updater - # If self-updater receives RSM update enabled, it will switch to RSM updater - # If RSM updater receives RSM update disabled, it will switch to self-update - # No change in updater if GS not updated - is_rsm_update_enabled = self._updater.is_rsm_update_enabled(agent_family, ext_gs_updated) + # Always agent uses self-update for initial update regardless vm enrolled into RSM or not + # So ignoring the check for updater switch for the initial goal state/update + if not self._is_initial_update(): - if not is_rsm_update_enabled and isinstance(self._updater, RSMVersionUpdater): - msg = "VM not enabled for RSM updates, switching to self-update mode" - logger.info(msg) - add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False) - self._updater = SelfUpdateVersionUpdater(self._gs_id) - self._remove_rsm_update_state() + # Updater will return True or False if we need to switch the updater + # If self-updater receives RSM update enabled, it will switch to RSM updater + # If RSM updater receives RSM update disabled, it will switch to self-update + # No change in updater if GS not updated + is_rsm_update_enabled = self._updater.is_rsm_update_enabled(agent_family, ext_gs_updated) - if is_rsm_update_enabled and isinstance(self._updater, SelfUpdateVersionUpdater): - msg = "VM enabled for RSM updates, switching to RSM update mode" - logger.info(msg) - add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False) - self._updater = RSMVersionUpdater(self._gs_id, self._daemon_version) - self._save_rsm_update_state() + if not is_rsm_update_enabled and isinstance(self._updater, RSMVersionUpdater): + msg = "VM not enabled for RSM updates, switching to self-update mode" + logger.info(msg) + add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False) + self._updater = SelfUpdateVersionUpdater(self._gs_id) + self._remove_rsm_update_state_file() + + if is_rsm_update_enabled and isinstance(self._updater, SelfUpdateVersionUpdater): + msg = "VM enabled for RSM updates, switching to RSM update mode" + logger.info(msg) + add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False) + self._updater = RSMVersionUpdater(self._gs_id, self._daemon_version) + self._save_rsm_update_state_file() # If updater is changed in previous step, we allow update as it consider as first attempt. If not, it checks below condition # RSM checks new goal state; self-update checks manifest download interval @@ -218,6 +282,11 @@ def run(self, goal_state, ext_gs_updated): add_event(op=WALAEventOperation.AgentUpgrade, is_success=False, message=error_msg, log_event=False) self._last_attempted_update_error_msg = error_msg + # save initial update state when agent is doing first update + finally: + if self._is_initial_update(): + self._save_initial_update_state_file() + def get_vmagent_update_status(self): """ This function gets the VMAgent update status as per the last attempted update. diff --git a/azurelinuxagent/ga/cgroup.py b/azurelinuxagent/ga/cgroup.py deleted file mode 100644 index b2bf32fbc1..0000000000 --- a/azurelinuxagent/ga/cgroup.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright 2018 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.6+ and Openssl 1.0+ - -import errno -import os -import re -from datetime import timedelta - -from azurelinuxagent.common import logger, conf -from azurelinuxagent.common.exception import CGroupsException -from azurelinuxagent.common.future import ustr -from azurelinuxagent.common.osutil import get_osutil -from azurelinuxagent.common.utils import fileutil - -_REPORT_EVERY_HOUR = timedelta(hours=1) -_DEFAULT_REPORT_PERIOD = timedelta(seconds=conf.get_cgroup_check_period()) - -AGENT_NAME_TELEMETRY = "walinuxagent.service" # Name used for telemetry; it needs to be consistent even if the name of the service changes -AGENT_LOG_COLLECTOR = "azure-walinuxagent-logcollector" - - -class CounterNotFound(Exception): - pass - - -class MetricValue(object): - - """ - Class for defining all the required metric fields to send telemetry. - """ - - def __init__(self, category, counter, instance, value, report_period=_DEFAULT_REPORT_PERIOD): - self._category = category - self._counter = counter - self._instance = instance - self._value = value - self._report_period = report_period - - @property - def category(self): - return self._category - - @property - def counter(self): - return self._counter - - @property - def instance(self): - return self._instance - - @property - def value(self): - return self._value - - @property - def report_period(self): - return self._report_period - - -class MetricsCategory(object): - MEMORY_CATEGORY = "Memory" - CPU_CATEGORY = "CPU" - - -class MetricsCounter(object): - PROCESSOR_PERCENT_TIME = "% Processor Time" - TOTAL_MEM_USAGE = "Total Memory Usage" - MAX_MEM_USAGE = "Max Memory Usage" - THROTTLED_TIME = "Throttled Time" - SWAP_MEM_USAGE = "Swap Memory Usage" - AVAILABLE_MEM = "Available MBytes" - USED_MEM = "Used MBytes" - - -re_user_system_times = re.compile(r'user (\d+)\nsystem (\d+)\n') - - -class CGroup(object): - def __init__(self, name, cgroup_path): - """ - Initialize _data collection for the Memory controller - :param: name: Name of the CGroup - :param: cgroup_path: Path of the controller - :return: - """ - self.name = name - self.path = cgroup_path - - def __str__(self): - return "{0} [{1}]".format(self.name, self.path) - - def _get_cgroup_file(self, file_name): - return os.path.join(self.path, file_name) - - def _get_file_contents(self, file_name): - """ - Retrieve the contents to file. - - :param str file_name: Name of file within that metric controller - :return: Entire contents of the file - :rtype: str - """ - parameter_file = self._get_cgroup_file(file_name) - - return fileutil.read_file(parameter_file) - - def _get_parameters(self, parameter_name, first_line_only=False): - """ - Retrieve the values of a parameter from a controller. - Returns a list of values in the file. - - :param first_line_only: return only the first line. - :param str parameter_name: Name of file within that metric controller - :return: The first line of the file, without line terminator - :rtype: [str] - """ - result = [] - try: - values = self._get_file_contents(parameter_name).splitlines() - result = values[0] if first_line_only else values - except IndexError: - parameter_filename = self._get_cgroup_file(parameter_name) - logger.error("File {0} is empty but should not be".format(parameter_filename)) - raise CGroupsException("File {0} is empty but should not be".format(parameter_filename)) - except Exception as e: - if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101 - raise e - parameter_filename = self._get_cgroup_file(parameter_name) - raise CGroupsException("Exception while attempting to read {0}".format(parameter_filename), e) - return result - - def is_active(self): - try: - tasks = self._get_parameters("tasks") - if tasks: - return len(tasks) != 0 - except (IOError, OSError) as e: - if e.errno == errno.ENOENT: - # only suppressing file not found exceptions. - pass - else: - logger.periodic_warn(logger.EVERY_HALF_HOUR, - 'Could not get list of tasks from "tasks" file in the cgroup: {0}.' - ' Internal error: {1}'.format(self.path, ustr(e))) - except CGroupsException as e: - logger.periodic_warn(logger.EVERY_HALF_HOUR, - 'Could not get list of tasks from "tasks" file in the cgroup: {0}.' - ' Internal error: {1}'.format(self.path, ustr(e))) - return False - - def get_tracked_metrics(self, **_): - """ - Retrieves the current value of the metrics tracked for this cgroup and returns them as an array. - - Note: Agent won't track the metrics if the current cpu ticks less than previous value and returns empty array. - """ - raise NotImplementedError() - - -class CpuCgroup(CGroup): - def __init__(self, name, cgroup_path): - super(CpuCgroup, self).__init__(name, cgroup_path) - - self._osutil = get_osutil() - self._previous_cgroup_cpu = None - self._previous_system_cpu = None - self._current_cgroup_cpu = None - self._current_system_cpu = None - self._previous_throttled_time = None - self._current_throttled_time = None - - def _get_cpu_ticks(self, allow_no_such_file_or_directory_error=False): - """ - Returns the number of USER_HZ of CPU time (user and system) consumed by this cgroup. - - If allow_no_such_file_or_directory_error is set to True and cpuacct.stat does not exist the function - returns 0; this is useful when the function can be called before the cgroup has been created. - """ - try: - cpuacct_stat = self._get_file_contents('cpuacct.stat') - except Exception as e: - if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: # pylint: disable=E1101 - raise CGroupsException("Failed to read cpuacct.stat: {0}".format(ustr(e))) - if not allow_no_such_file_or_directory_error: - raise e - cpuacct_stat = None - - cpu_ticks = 0 - - if cpuacct_stat is not None: - # - # Sample file: - # # cat /sys/fs/cgroup/cpuacct/azure.slice/walinuxagent.service/cpuacct.stat - # user 10190 - # system 3160 - # - match = re_user_system_times.match(cpuacct_stat) - if not match: - raise CGroupsException( - "The contents of {0} are invalid: {1}".format(self._get_cgroup_file('cpuacct.stat'), cpuacct_stat)) - cpu_ticks = int(match.groups()[0]) + int(match.groups()[1]) - - return cpu_ticks - - def get_throttled_time(self): - try: - with open(os.path.join(self.path, 'cpu.stat')) as cpu_stat: - # - # Sample file: - # - # # cat /sys/fs/cgroup/cpuacct/azure.slice/walinuxagent.service/cpu.stat - # nr_periods 51660 - # nr_throttled 19461 - # throttled_time 1529590856339 - # - for line in cpu_stat: - match = re.match(r'throttled_time\s+(\d+)', line) - if match is not None: - return int(match.groups()[0]) - raise Exception("Cannot find throttled_time") - except (IOError, OSError) as e: - if e.errno == errno.ENOENT: - return 0 - raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e))) - except Exception as e: - raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e))) - - def _cpu_usage_initialized(self): - return self._current_cgroup_cpu is not None and self._current_system_cpu is not None - - def initialize_cpu_usage(self): - """ - Sets the initial values of CPU usage. This function must be invoked before calling get_cpu_usage(). - """ - if self._cpu_usage_initialized(): - raise CGroupsException("initialize_cpu_usage() should be invoked only once") - self._current_cgroup_cpu = self._get_cpu_ticks(allow_no_such_file_or_directory_error=True) - self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot() - self._current_throttled_time = self.get_throttled_time() - - def get_cpu_usage(self): - """ - Computes the CPU used by the cgroup since the last call to this function. - - The usage is measured as a percentage of utilization of 1 core in the system. For example, - using 1 core all of the time on a 4-core system would be reported as 100%. - - NOTE: initialize_cpu_usage() must be invoked before calling get_cpu_usage() - """ - if not self._cpu_usage_initialized(): - raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_usage()") - - self._previous_cgroup_cpu = self._current_cgroup_cpu - self._previous_system_cpu = self._current_system_cpu - self._current_cgroup_cpu = self._get_cpu_ticks() - self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot() - - cgroup_delta = self._current_cgroup_cpu - self._previous_cgroup_cpu - system_delta = max(1, self._current_system_cpu - self._previous_system_cpu) - - return round(100.0 * self._osutil.get_processor_cores() * float(cgroup_delta) / float(system_delta), 3) - - def get_cpu_throttled_time(self, read_previous_throttled_time=True): - """ - Computes the throttled time (in seconds) since the last call to this function. - NOTE: initialize_cpu_usage() must be invoked before calling this function - Compute only current throttled time if read_previous_throttled_time set to False - """ - if not read_previous_throttled_time: - return float(self.get_throttled_time() / 1E9) - - if not self._cpu_usage_initialized(): - raise CGroupsException( - "initialize_cpu_usage() must be invoked before the first call to get_throttled_time()") - - self._previous_throttled_time = self._current_throttled_time - self._current_throttled_time = self.get_throttled_time() - - return float(self._current_throttled_time - self._previous_throttled_time) / 1E9 - - def get_tracked_metrics(self, **kwargs): - tracked = [] - cpu_usage = self.get_cpu_usage() - if cpu_usage >= float(0): - tracked.append( - MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.PROCESSOR_PERCENT_TIME, self.name, cpu_usage)) - - if 'track_throttled_time' in kwargs and kwargs['track_throttled_time']: - throttled_time = self.get_cpu_throttled_time() - if cpu_usage >= float(0) and throttled_time >= float(0): - tracked.append( - MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.THROTTLED_TIME, self.name, throttled_time)) - - return tracked - - -class MemoryCgroup(CGroup): - def __init__(self, name, cgroup_path): - super(MemoryCgroup, self).__init__(name, cgroup_path) - - self._counter_not_found_error_count = 0 - - def _get_memory_stat_counter(self, counter_name): - try: - with open(os.path.join(self.path, 'memory.stat')) as memory_stat: - # cat /sys/fs/cgroup/memory/azure.slice/memory.stat - # cache 67178496 - # rss 42340352 - # rss_huge 6291456 - # swap 0 - for line in memory_stat: - re_memory_counter = r'{0}\s+(\d+)'.format(counter_name) - match = re.match(re_memory_counter, line) - if match is not None: - return int(match.groups()[0]) - except (IOError, OSError) as e: - if e.errno == errno.ENOENT: - raise - raise CGroupsException("Failed to read memory.stat: {0}".format(ustr(e))) - except Exception as e: - raise CGroupsException("Failed to read memory.stat: {0}".format(ustr(e))) - - raise CounterNotFound("Cannot find counter: {0}".format(counter_name)) - - def get_memory_usage(self): - """ - Collect RSS+CACHE from memory.stat cgroup. - - :return: Memory usage in bytes - :rtype: int - """ - - cache = self._get_memory_stat_counter("cache") - rss = self._get_memory_stat_counter("rss") - return cache + rss - - def try_swap_memory_usage(self): - """ - Collect SWAP from memory.stat cgroup. - - :return: Memory usage in bytes - :rtype: int - Note: stat file is the only place to get the SWAP since other swap related file memory.memsw.usage_in_bytes is for total Memory+SWAP. - """ - try: - return self._get_memory_stat_counter("swap") - except CounterNotFound as e: - if self._counter_not_found_error_count < 1: - logger.periodic_info(logger.EVERY_HALF_HOUR, - '{0} from "memory.stat" file in the cgroup: {1}---[Note: This log for informational purpose only and can be ignored]'.format(ustr(e), self.path)) - self._counter_not_found_error_count += 1 - return 0 - - def get_max_memory_usage(self): - """ - Collect memory.max_usage_in_bytes from the cgroup. - - :return: Memory usage in bytes - :rtype: int - """ - usage = 0 - try: - usage = int(self._get_parameters('memory.max_usage_in_bytes', first_line_only=True)) - except Exception as e: - if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101 - raise - raise CGroupsException("Exception while attempting to read {0}".format("memory.max_usage_in_bytes"), e) - - return usage - - def get_tracked_metrics(self, **_): - return [ - MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.TOTAL_MEM_USAGE, self.name, - self.get_memory_usage()), - MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.MAX_MEM_USAGE, self.name, - self.get_max_memory_usage(), _REPORT_EVERY_HOUR), - MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.SWAP_MEM_USAGE, self.name, - self.try_swap_memory_usage(), _REPORT_EVERY_HOUR) - ] diff --git a/azurelinuxagent/ga/cgroupapi.py b/azurelinuxagent/ga/cgroupapi.py index 6f4bf4ab34..b030633f23 100644 --- a/azurelinuxagent/ga/cgroupapi.py +++ b/azurelinuxagent/ga/cgroupapi.py @@ -14,7 +14,7 @@ # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ - +import json import os import re import shutil @@ -23,8 +23,10 @@ import uuid from azurelinuxagent.common import logger -from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup +from azurelinuxagent.common.event import WALAEventOperation, add_event from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry +from azurelinuxagent.ga.cpucontroller import _CpuController, CpuControllerV1, CpuControllerV2 +from azurelinuxagent.ga.memorycontroller import MemoryControllerV1, MemoryControllerV2 from azurelinuxagent.common.conf import get_agent_pid_file_path from azurelinuxagent.common.exception import CGroupsException, ExtensionErrorCodes, ExtensionError, \ ExtensionOperationError @@ -36,21 +38,26 @@ from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.version import get_distro -CGROUPS_FILE_SYSTEM_ROOT = '/sys/fs/cgroup' -CGROUP_CONTROLLERS = ["cpu", "memory"] +CGROUP_FILE_SYSTEM_ROOT = '/sys/fs/cgroup' EXTENSION_SLICE_PREFIX = "azure-vmextensions" -class SystemdRunError(CGroupsException): - """ - Raised when systemd-run fails - """ +def log_cgroup_info(formatted_string, op=WALAEventOperation.CGroupsInfo, send_event=True): + logger.info("[CGI] " + formatted_string) + if send_event: + add_event(op=op, message=formatted_string) - def __init__(self, msg=None): - super(SystemdRunError, self).__init__(msg) +def log_cgroup_warning(formatted_string, op=WALAEventOperation.CGroupsInfo, send_event=True): + logger.info("[CGW] " + formatted_string) # log as INFO for now, in the future it should be logged as WARNING + if send_event: + add_event(op=op, message=formatted_string, is_success=False, log_event=False) -class CGroupsApi(object): + +class CGroupUtil(object): + """ + Cgroup utility methods which are independent of systemd cgroup api. + """ @staticmethod def cgroups_supported(): distro_info = get_distro() @@ -63,18 +70,18 @@ def cgroups_supported(): (distro_name.lower() in ('centos', 'redhat') and 8 <= distro_version.major < 9) @staticmethod - def track_cgroups(extension_cgroups): - try: - for cgroup in extension_cgroups: - CGroupsTelemetry.track_cgroup(cgroup) - except Exception as exception: - logger.warn("Cannot add cgroup '{0}' to tracking list; resource usage will not be tracked. " - "Error: {1}".format(cgroup.path, ustr(exception))) + def get_extension_slice_name(extension_name, old_slice=False): + # The old slice makes it difficult for user to override the limits because they need to place drop-in files on every upgrade if extension slice is different for each version. + # old slice includes .- + # new slice without version . + if not old_slice: + extension_name = extension_name.rsplit("-", 1)[0] + # Since '-' is used as a separator in systemd unit names, we replace it with '_' to prevent side-effects. + return EXTENSION_SLICE_PREFIX + "-" + extension_name.replace('-', '_') + ".slice" @staticmethod - def get_processes_in_cgroup(cgroup_path): - with open(os.path.join(cgroup_path, "cgroup.procs"), "r") as cgroup_procs: - return [int(pid) for pid in cgroup_procs.read().split()] + def get_daemon_pid(): + return int(fileutil.read_file(get_agent_pid_file_path()).strip()) @staticmethod def _foreach_legacy_cgroup(operation): @@ -92,9 +99,9 @@ def _foreach_legacy_cgroup(operation): """ legacy_cgroups = [] for controller in ['cpu', 'memory']: - cgroup = os.path.join(CGROUPS_FILE_SYSTEM_ROOT, controller, "WALinuxAgent", "WALinuxAgent") + cgroup = os.path.join(CGROUP_FILE_SYSTEM_ROOT, controller, "WALinuxAgent", "WALinuxAgent") if os.path.exists(cgroup): - logger.info('Found legacy cgroup {0}', cgroup) + log_cgroup_info('Found legacy cgroup {0}'.format(cgroup), send_event=False) legacy_cgroups.append((controller, cgroup)) try: @@ -103,29 +110,99 @@ def _foreach_legacy_cgroup(operation): if os.path.exists(procs_file): procs_file_contents = fileutil.read_file(procs_file).strip() - daemon_pid = CGroupsApi.get_daemon_pid() + daemon_pid = CGroupUtil.get_daemon_pid() if ustr(daemon_pid) in procs_file_contents: operation(controller, daemon_pid) finally: for _, cgroup in legacy_cgroups: - logger.info('Removing {0}', cgroup) + log_cgroup_info('Removing {0}'.format(cgroup), send_event=False) shutil.rmtree(cgroup, ignore_errors=True) return len(legacy_cgroups) @staticmethod - def get_daemon_pid(): - return int(fileutil.read_file(get_agent_pid_file_path()).strip()) + def cleanup_legacy_cgroups(): + """ + Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; + starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. If + we find that any of the legacy groups include the PID of the daemon then we need to disable data collection for this + instance (under systemd, moving PIDs across the cgroup file system can produce unpredictable results) + """ + return CGroupUtil._foreach_legacy_cgroup(lambda *_: None) + + +class SystemdRunError(CGroupsException): + """ + Raised when systemd-run fails + """ + + def __init__(self, msg=None): + super(SystemdRunError, self).__init__(msg) -class SystemdCgroupsApi(CGroupsApi): +class InvalidCgroupMountpointException(CGroupsException): """ - Cgroups interface via systemd + Raised when the cgroup mountpoint is invalid. """ + def __init__(self, msg=None): + super(InvalidCgroupMountpointException, self).__init__(msg) + + +def get_cgroup_api(): + """ + Determines which version of Cgroup should be used for resource enforcement and monitoring by the Agent and returns + the corresponding Api. + + Uses 'stat -f --format=%T /sys/fs/cgroup' to get the cgroup hierarchy in use. + If the result is 'cgroup2fs', cgroup v2 is being used. + If the result is 'tmpfs', cgroup v1 or a hybrid mode is being used. + If the result of 'stat -f --format=%T /sys/fs/cgroup/unified' is 'cgroup2fs', then hybrid mode is being used. + + Raises exception if cgroup filesystem mountpoint is not '/sys/fs/cgroup', or an unknown mode is detected. Also + raises exception if hybrid mode is detected and there are controllers available to be enabled in the unified + hierarchy (the agent does not support cgroups if there are controllers simultaneously attached to v1 and v2 + hierarchies). + """ + if not os.path.exists(CGROUP_FILE_SYSTEM_ROOT): + v1_mount_point = shellutil.run_command(['findmnt', '-t', 'cgroup', '--noheadings']) + v2_mount_point = shellutil.run_command(['findmnt', '-t', 'cgroup2', '--noheadings']) + raise InvalidCgroupMountpointException("Expected cgroup filesystem to be mounted at '{0}', but it is not.\n v1 mount point: \n{1}\n v2 mount point: \n{2}".format(CGROUP_FILE_SYSTEM_ROOT, v1_mount_point, v2_mount_point)) + + root_hierarchy_mode = shellutil.run_command(["stat", "-f", "--format=%T", CGROUP_FILE_SYSTEM_ROOT]).rstrip() + + if root_hierarchy_mode == "cgroup2fs": + log_cgroup_info("Using cgroup v2 for resource enforcement and monitoring") + return SystemdCgroupApiv2() + + elif root_hierarchy_mode == "tmpfs": + # Check if a hybrid mode is being used + unified_hierarchy_path = os.path.join(CGROUP_FILE_SYSTEM_ROOT, "unified") + if os.path.exists(unified_hierarchy_path) and shellutil.run_command(["stat", "-f", "--format=%T", unified_hierarchy_path]).rstrip() == "cgroup2fs": + # Hybrid mode is being used. Check if any controllers are available to be enabled in the unified hierarchy. + available_unified_controllers_file = os.path.join(unified_hierarchy_path, "cgroup.controllers") + if os.path.exists(available_unified_controllers_file): + available_unified_controllers = fileutil.read_file(available_unified_controllers_file).rstrip() + if available_unified_controllers != "": + raise CGroupsException("Detected hybrid cgroup mode, but there are controllers available to be enabled in unified hierarchy: {0}".format(available_unified_controllers)) + + cgroup_api_v1 = SystemdCgroupApiv1() + # Previously the agent supported users mounting cgroup v1 controllers in locations other than the systemd + # default ('/sys/fs/cgroup'). The agent no longer supports this scenario. If any agent supported controller is + # mounted in a location other than the systemd default, raise Exception. + if not cgroup_api_v1.are_mountpoints_systemd_created(): + raise InvalidCgroupMountpointException("Expected cgroup controllers to be mounted at '{0}', but at least one is not. v1 mount points: \n{1}".format(CGROUP_FILE_SYSTEM_ROOT, json.dumps(cgroup_api_v1.get_controller_mountpoints()))) + log_cgroup_info("Using cgroup v1 for resource enforcement and monitoring") + return cgroup_api_v1 + + raise CGroupsException("{0} has an unexpected file type: {1}".format(CGROUP_FILE_SYSTEM_ROOT, root_hierarchy_mode)) + + +class _SystemdCgroupApi(object): + """ + Cgroup interface via systemd. Contains common api implementations between cgroup v1 and v2. + """ def __init__(self): - self._cgroup_mountpoints = None - self._agent_unit_name = None self._systemd_run_commands = [] self._systemd_run_commands_lock = threading.RLock() @@ -136,137 +213,176 @@ def get_systemd_run_commands(self): with self._systemd_run_commands_lock: return self._systemd_run_commands[:] - def get_cgroup_mount_points(self): + def get_unit_cgroup(self, unit_name, cgroup_name): """ - Returns a tuple with the mount points for the cpu and memory controllers; the values can be None - if the corresponding controller is not mounted + Cgroup version specific. Returns a representation of the unit cgroup. + + :param unit_name: The unit to return the cgroup of. + :param cgroup_name: A name to represent the cgroup. Used for logging/tracking purposes. """ - # the output of mount is similar to - # $ mount -t cgroup - # cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd) - # cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct) - # cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory) - # etc - # - if self._cgroup_mountpoints is None: - cpu = None - memory = None - for line in shellutil.run_command(['mount', '-t', 'cgroup']).splitlines(): - match = re.search(r'on\s+(?P/\S+(memory|cpuacct))\s', line) - if match is not None: - path = match.group('path') - if 'cpuacct' in path: - cpu = path - else: - memory = path - self._cgroup_mountpoints = {'cpu': cpu, 'memory': memory} - - return self._cgroup_mountpoints['cpu'], self._cgroup_mountpoints['memory'] + raise NotImplementedError() - @staticmethod - def get_process_cgroup_relative_paths(process_id): - """ - Returns a tuple with the path of the cpu and memory cgroups for the given process (relative to the mount point of the corresponding - controller). - The 'process_id' can be a numeric PID or the string "self" for the current process. - The values returned can be None if the process is not in a cgroup for that controller (e.g. the controller is not mounted). - """ - # The contents of the file are similar to - # # cat /proc/1218/cgroup - # 10:memory:/system.slice/walinuxagent.service - # 3:cpu,cpuacct:/system.slice/walinuxagent.service - # etc - cpu_path = None - memory_path = None - for line in fileutil.read_file("/proc/{0}/cgroup".format(process_id)).splitlines(): - match = re.match(r'\d+:(?P(memory|.*cpuacct.*)):(?P.+)', line) - if match is not None: - controller = match.group('controller') - path = match.group('path').lstrip('/') if match.group('path') != '/' else None - if controller == 'memory': - memory_path = path - else: - cpu_path = path + def get_cgroup_from_relative_path(self, relative_path, cgroup_name): + """ + Cgroup version specific. Returns a representation of the cgroup at the provided relative path. - return cpu_path, memory_path + :param relative_path: The relative path to return the cgroup of. + :param cgroup_name: A name to represent the cgroup. Used for logging/tracking purposes. + """ + raise NotImplementedError() - def get_process_cgroup_paths(self, process_id): + def get_process_cgroup(self, process_id, cgroup_name): """ - Returns a tuple with the path of the cpu and memory cgroups for the given process. The 'process_id' can be a numeric PID or the string "self" for the current process. - The values returned can be None if the process is not in a cgroup for that controller (e.g. the controller is not mounted). + Cgroup version specific. Returns a representation of the process' cgroup. + + :param process_id: A numeric PID to return the cgroup of, or the string "self" to return the cgroup of the current process. + :param cgroup_name: A name to represent the cgroup. Used for logging/tracking purposes. """ - cpu_cgroup_relative_path, memory_cgroup_relative_path = self.get_process_cgroup_relative_paths(process_id) + raise NotImplementedError() - cpu_mount_point, memory_mount_point = self.get_cgroup_mount_points() + def log_root_paths(self): + """ + Cgroup version specific. Logs the root paths of the cgroup filesystem/controllers. + """ + raise NotImplementedError() - cpu_cgroup_path = os.path.join(cpu_mount_point, cpu_cgroup_relative_path) \ - if cpu_mount_point is not None and cpu_cgroup_relative_path is not None else None + def start_extension_command(self, extension_name, command, cmd_name, timeout, shell, cwd, env, stdout, stderr, + error_code=ExtensionErrorCodes.PluginUnknownFailure): + """ + Cgroup version specific. Starts extension command. + """ + raise NotImplementedError() + + @staticmethod + def _is_systemd_failure(scope_name, stderr): + stderr.seek(0) + stderr = ustr(stderr.read(TELEMETRY_MESSAGE_MAX_LEN), encoding='utf-8', errors='backslashreplace') + unit_not_found = "Unit {0} not found.".format(scope_name) + return unit_not_found in stderr or scope_name not in stderr - memory_cgroup_path = os.path.join(memory_mount_point, memory_cgroup_relative_path) \ - if memory_mount_point is not None and memory_cgroup_relative_path is not None else None - return cpu_cgroup_path, memory_cgroup_path +class SystemdCgroupApiv1(_SystemdCgroupApi): + """ + Cgroup v1 interface via systemd + """ + def __init__(self): + super(SystemdCgroupApiv1, self).__init__() + self._cgroup_mountpoints = self._get_controller_mountpoints() - def get_unit_cgroup_paths(self, unit_name): + @staticmethod + def _get_controller_mountpoints(): """ - Returns a tuple with the path of the cpu and memory cgroups for the given unit. - The values returned can be None if the controller is not mounted. - Ex: ControlGroup=/azure.slice/walinuxagent.service - controlgroup_path[1:] = azure.slice/walinuxagent.service + In v1, each controller is mounted at a different path. Use findmnt to get each path. + + the output of findmnt is similar to + $ findmnt -t cgroup --noheadings + /sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd + /sys/fs/cgroup/memory cgroup cgroup rw,nosuid,nodev,noexec,relatime,memory + /sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct + etc + + Returns a dictionary of the controller-path mappings. The dictionary only includes the controllers which are + supported by the agent. """ - controlgroup_path = systemd.get_unit_property(unit_name, "ControlGroup") - cpu_mount_point, memory_mount_point = self.get_cgroup_mount_points() + mount_points = {} + for line in shellutil.run_command(['findmnt', '-t', 'cgroup', '--noheadings']).splitlines(): + # In v2, we match only the systemd default mountpoint ('/sys/fs/cgroup'). In v1, we match any path. This + # is because the agent previously supported users mounting controllers at locations other than the systemd + # default in v1. + match = re.search(r'(?P\S+\/(?P\S+))\s+cgroup', line) + if match is not None: + path = match.group('path') + controller = match.group('controller') + if controller is not None and path is not None and controller in CgroupV1.get_supported_controller_names(): + mount_points[controller] = path + return mount_points - cpu_cgroup_path = os.path.join(cpu_mount_point, controlgroup_path[1:]) \ - if cpu_mount_point is not None else None + def get_controller_mountpoints(self): + """ + Returns a dictionary of controller-mountpoint mappings. + """ + return self._cgroup_mountpoints - memory_cgroup_path = os.path.join(memory_mount_point, controlgroup_path[1:]) \ - if memory_mount_point is not None else None + def are_mountpoints_systemd_created(self): + """ + Systemd mounts each controller at '/sys/fs/cgroup/'. Returns True if all mounted controllers which + are supported by the agent have mountpoints which match this pattern, False otherwise. - return cpu_cgroup_path, memory_cgroup_path + The agent does not support cgroup usage if the default root systemd mountpoint (/sys/fs/cgroup) is not used. + This method is used to check if any users are using non-systemd mountpoints. If they are, the agent drop-in + files will be cleaned up in cgroupconfigurator. + """ + for controller, mount_point in self._cgroup_mountpoints.items(): + if mount_point != os.path.join(CGROUP_FILE_SYSTEM_ROOT, controller): + return False + return True @staticmethod - def get_cgroup2_controllers(): + def _get_process_relative_controller_paths(process_id): """ - Returns a tuple with the mount point for the cgroups v2 controllers, and the currently mounted controllers; - either value can be None if cgroups v2 or its controllers are not mounted + Returns the relative paths of the cgroup for the given process as a dict of controller-path mappings. The result + only includes controllers which are supported. + The contents of the /proc/{process_id}/cgroup file are similar to + # cat /proc/1218/cgroup + 10:memory:/system.slice/walinuxagent.service + 3:cpu,cpuacct:/system.slice/walinuxagent.service + etc + + :param process_id: A numeric PID to return the relative paths of, or the string "self" to return the relative paths of the current process. """ - # the output of mount is similar to - # $ mount -t cgroup2 - # cgroup2 on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime,nsdelegate) - # - for line in shellutil.run_command(['mount', '-t', 'cgroup2']).splitlines(): - match = re.search(r'on\s+(?P/\S+)\s', line) + conroller_relative_paths = {} + for line in fileutil.read_file("/proc/{0}/cgroup".format(process_id)).splitlines(): + match = re.match(r'\d+:(?P.+):(?P.+)', line) if match is not None: - mount_point = match.group('path') - controllers = None - controllers_file = os.path.join(mount_point, 'cgroup.controllers') - if os.path.exists(controllers_file): - controllers = fileutil.read_file(controllers_file) - return mount_point, controllers - return None, None + controller = match.group('controller') + path = match.group('path').lstrip('/') if match.group('path') != '/' else None + if path is not None and controller in CgroupV1.get_supported_controller_names(): + conroller_relative_paths[controller] = path - @staticmethod - def _is_systemd_failure(scope_name, stderr): - stderr.seek(0) - stderr = ustr(stderr.read(TELEMETRY_MESSAGE_MAX_LEN), encoding='utf-8', errors='backslashreplace') - unit_not_found = "Unit {0} not found.".format(scope_name) - return unit_not_found in stderr or scope_name not in stderr + return conroller_relative_paths - @staticmethod - def get_extension_slice_name(extension_name, old_slice=False): - # The old slice makes it difficult for user to override the limits because they need to place drop-in files on every upgrade if extension slice is different for each version. - # old slice includes .- - # new slice without version . - if not old_slice: - extension_name = extension_name.rsplit("-", 1)[0] - # Since '-' is used as a separator in systemd unit names, we replace it with '_' to prevent side-effects. - return EXTENSION_SLICE_PREFIX + "-" + extension_name.replace('-', '_') + ".slice" + def get_unit_cgroup(self, unit_name, cgroup_name): + unit_cgroup_relative_path = systemd.get_unit_property(unit_name, "ControlGroup") + unit_controller_paths = {} + + for controller, mountpoint in self._cgroup_mountpoints.items(): + unit_controller_paths[controller] = os.path.join(mountpoint, unit_cgroup_relative_path[1:]) + + return CgroupV1(cgroup_name=cgroup_name, controller_mountpoints=self._cgroup_mountpoints, + controller_paths=unit_controller_paths) + + def get_cgroup_from_relative_path(self, relative_path, cgroup_name): + controller_paths = {} + for controller, mountpoint in self._cgroup_mountpoints.items(): + controller_paths[controller] = os.path.join(mountpoint, relative_path) + + return CgroupV1(cgroup_name=cgroup_name, controller_mountpoints=self._cgroup_mountpoints, + controller_paths=controller_paths) + + def get_process_cgroup(self, process_id, cgroup_name): + relative_controller_paths = self._get_process_relative_controller_paths(process_id) + process_controller_paths = {} + + for controller, mountpoint in self._cgroup_mountpoints.items(): + relative_controller_path = relative_controller_paths.get(controller) + if relative_controller_path is not None: + process_controller_paths[controller] = os.path.join(mountpoint, relative_controller_path) + + return CgroupV1(cgroup_name=cgroup_name, controller_mountpoints=self._cgroup_mountpoints, + controller_paths=process_controller_paths) + + def log_root_paths(self): + for controller in CgroupV1.get_supported_controller_names(): + mount_point = self._cgroup_mountpoints.get(controller) + if mount_point is None: + log_cgroup_info("The {0} controller is not mounted".format(controller)) + else: + log_cgroup_info("The {0} controller is mounted at {1}".format(controller, mount_point)) def start_extension_command(self, extension_name, command, cmd_name, timeout, shell, cwd, env, stdout, stderr, error_code=ExtensionErrorCodes.PluginUnknownFailure): scope = "{0}_{1}".format(cmd_name, uuid.uuid4()) - extension_slice_name = self.get_extension_slice_name(extension_name) + extension_slice_name = CGroupUtil.get_extension_slice_name(extension_name) with self._systemd_run_commands_lock: process = subprocess.Popen( # pylint: disable=W1509 # Some distros like ubuntu20 by default cpu and memory accounting enabled. Thus create nested cgroups under the extension slice @@ -285,39 +401,28 @@ def start_extension_command(self, extension_name, command, cmd_name, timeout, sh scope_name = scope + '.scope' - logger.info("Started extension in unit '{0}'", scope_name) + log_cgroup_info("Started extension in unit '{0}'".format(scope_name), send_event=False) - cpu_cgroup = None + cpu_controller = None try: cgroup_relative_path = os.path.join('azure.slice/azure-vmextensions.slice', extension_slice_name) - - cpu_cgroup_mountpoint, memory_cgroup_mountpoint = self.get_cgroup_mount_points() - - if cpu_cgroup_mountpoint is None: - logger.info("The CPU controller is not mounted; will not track resource usage") - else: - cpu_cgroup_path = os.path.join(cpu_cgroup_mountpoint, cgroup_relative_path) - cpu_cgroup = CpuCgroup(extension_name, cpu_cgroup_path) - CGroupsTelemetry.track_cgroup(cpu_cgroup) - - if memory_cgroup_mountpoint is None: - logger.info("The Memory controller is not mounted; will not track resource usage") - else: - memory_cgroup_path = os.path.join(memory_cgroup_mountpoint, cgroup_relative_path) - memory_cgroup = MemoryCgroup(extension_name, memory_cgroup_path) - CGroupsTelemetry.track_cgroup(memory_cgroup) + cgroup = self.get_cgroup_from_relative_path(cgroup_relative_path, extension_name) + for controller in cgroup.get_controllers(): + if isinstance(controller, _CpuController): + cpu_controller = controller + CGroupsTelemetry.track_cgroup_controller(controller) except IOError as e: if e.errno == 2: # 'No such file or directory' - logger.info("The extension command already completed; will not track resource usage") - logger.info("Failed to start tracking resource usage for the extension: {0}", ustr(e)) + log_cgroup_info("The extension command already completed; will not track resource usage", send_event=False) + log_cgroup_info("Failed to start tracking resource usage for the extension: {0}".format(ustr(e)), send_event=False) except Exception as e: - logger.info("Failed to start tracking resource usage for the extension: {0}", ustr(e)) + log_cgroup_info("Failed to start tracking resource usage for the extension: {0}".format(ustr(e)), send_event=False) # Wait for process completion or timeout try: return handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout, - stderr=stderr, error_code=error_code, cpu_cgroup=cpu_cgroup) + stderr=stderr, error_code=error_code, cpu_controller=cpu_controller) except ExtensionError as e: # The extension didn't terminate successfully. Determine whether it was due to systemd errors or # extension errors. @@ -342,11 +447,306 @@ def start_extension_command(self, extension_name, command, cmd_name, timeout, sh with self._systemd_run_commands_lock: self._systemd_run_commands.remove(process.pid) - def cleanup_legacy_cgroups(self): + +class SystemdCgroupApiv2(_SystemdCgroupApi): + """ + Cgroup v2 interface via systemd + """ + def __init__(self): + super(SystemdCgroupApiv2, self).__init__() + self._root_cgroup_path = self._get_root_cgroup_path() + self._controllers_enabled_at_root = self._get_controllers_enabled_at_root(self._root_cgroup_path) if self._root_cgroup_path != "" else [] + + @staticmethod + def _get_root_cgroup_path(): """ - Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; - starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. If - we find that any of the legacy groups include the PID of the daemon then we need to disable data collection for this - instance (under systemd, moving PIDs across the cgroup file system can produce unpredictable results) + In v2, there is a unified mount point shared by all controllers. Use findmnt to get the unified mount point. + + The output of findmnt is similar to + $ findmnt -t cgroup2 --noheadings + /sys/fs/cgroup cgroup2 cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot + + Returns empty string if the root cgroup cannot be determined from the output above. + """ + # + for line in shellutil.run_command(['findmnt', '-t', 'cgroup2', '--noheadings']).splitlines(): + # Systemd mounts the cgroup filesystem at '/sys/fs/cgroup'. The agent does not support cgroups if the + # filesystem is mounted elsewhere, so search specifically for '/sys/fs/cgroup' in the findmnt output. + match = re.search(r'(?P\/sys\/fs\/cgroup)\s+cgroup2', line) + if match is not None: + root_cgroup_path = match.group('path') + if root_cgroup_path is not None: + return root_cgroup_path + return "" + + def get_root_cgroup_path(self): + """ + Returns the unified cgroup mountpoint. + """ + return self._root_cgroup_path + + @staticmethod + def _get_controllers_enabled_at_root(root_cgroup_path): + """ + Returns a list of the controllers enabled at the root cgroup. The cgroup.subtree_control file at the root shows + a space separated list of the controllers which are enabled to control resource distribution from the root + cgroup to its children. If a controller is listed here, then that controller is available to enable in children + cgroups. Returns only the enabled controllers which are supported by the agent. + + $ cat /sys/fs/cgroup/cgroup.subtree_control + cpuset cpu io memory hugetlb pids rdma misc + """ + enabled_controllers_file = os.path.join(root_cgroup_path, 'cgroup.subtree_control') + if os.path.exists(enabled_controllers_file): + controllers_enabled_at_root = fileutil.read_file(enabled_controllers_file).rstrip().split() + return list(set(controllers_enabled_at_root) & set(CgroupV2.get_supported_controller_names())) + return [] + + @staticmethod + def _get_process_relative_cgroup_path(process_id): + """ + Returns the relative path of the cgroup for the given process. + The contents of the /proc/{process_id}/cgroup file are similar to + # cat /proc/1218/cgroup + 0::/azure.slice/walinuxagent.service + + :param process_id: A numeric PID to return the relative path of, or the string "self" to return the relative path of the current process. + """ + relative_path = "" + for line in fileutil.read_file("/proc/{0}/cgroup".format(process_id)).splitlines(): + match = re.match(r'0::(?P\S+)', line) + if match is not None: + relative_path = match.group('path').lstrip('/') if match.group('path') != '/' else "" + + return relative_path + + def get_unit_cgroup(self, unit_name, cgroup_name): + unit_cgroup_relative_path = systemd.get_unit_property(unit_name, "ControlGroup") + unit_cgroup_path = "" + + if self._root_cgroup_path != "": + unit_cgroup_path = os.path.join(self._root_cgroup_path, unit_cgroup_relative_path[1:]) + + return CgroupV2(cgroup_name=cgroup_name, root_cgroup_path=self._root_cgroup_path, cgroup_path=unit_cgroup_path, enabled_controllers=self._controllers_enabled_at_root) + + def get_cgroup_from_relative_path(self, relative_path, cgroup_name): + cgroup_path = "" + if self._root_cgroup_path != "": + cgroup_path = os.path.join(self._root_cgroup_path, relative_path) + + return CgroupV2(cgroup_name=cgroup_name, root_cgroup_path=self._root_cgroup_path, cgroup_path=cgroup_path, enabled_controllers=self._controllers_enabled_at_root) + + def get_process_cgroup(self, process_id, cgroup_name): + relative_path = self._get_process_relative_cgroup_path(process_id) + cgroup_path = "" + + if self._root_cgroup_path != "": + cgroup_path = os.path.join(self._root_cgroup_path, relative_path) + + return CgroupV2(cgroup_name=cgroup_name, root_cgroup_path=self._root_cgroup_path, cgroup_path=cgroup_path, enabled_controllers=self._controllers_enabled_at_root) + + def log_root_paths(self): + log_cgroup_info("The root cgroup path is {0}".format(self._root_cgroup_path)) + for controller in CgroupV2.get_supported_controller_names(): + if controller in self._controllers_enabled_at_root: + log_cgroup_info("The {0} controller is enabled at the root cgroup".format(controller)) + else: + log_cgroup_info("The {0} controller is not enabled at the root cgroup".format(controller)) + + def start_extension_command(self, extension_name, command, cmd_name, timeout, shell, cwd, env, stdout, stderr, + error_code=ExtensionErrorCodes.PluginUnknownFailure): + raise NotImplementedError() + + +class Cgroup(object): + MEMORY_CONTROLLER = "memory" + + def __init__(self, cgroup_name): + self._cgroup_name = cgroup_name + + @staticmethod + def get_supported_controller_names(): + """ + Cgroup version specific. Returns a list of the controllers which the agent supports as strings. + """ + raise NotImplementedError() + + def check_in_expected_slice(self, expected_slice): + """ + Cgroup version specific. Returns True if the cgroup is in the expected slice, False otherwise. + + :param expected_slice: The slice the cgroup is expected to be in. + """ + raise NotImplementedError() + + def get_controllers(self, expected_relative_path=None): + """ + Cgroup version specific. Returns a list of the agent supported controllers which are mounted/enabled for the cgroup. + + :param expected_relative_path: The expected relative path of the cgroup. If provided, only controllers mounted + at this expected path will be returned. + """ + raise NotImplementedError() + + def get_processes(self): + """ + Cgroup version specific. Returns a list of all the process ids in the cgroup. + """ + raise NotImplementedError() + + +class CgroupV1(Cgroup): + CPU_CONTROLLER = "cpu,cpuacct" + + def __init__(self, cgroup_name, controller_mountpoints, controller_paths): + """ + :param cgroup_name: The name of the cgroup. Used for logging/tracking purposes. + :param controller_mountpoints: A dictionary of controller-mountpoint mappings for each agent supported controller which is mounted. + :param controller_paths: A dictionary of controller-path mappings for each agent supported controller which is mounted. The path represents the absolute path of the controller. + """ + super(CgroupV1, self).__init__(cgroup_name=cgroup_name) + self._controller_mountpoints = controller_mountpoints + self._controller_paths = controller_paths + + @staticmethod + def get_supported_controller_names(): + return [CgroupV1.CPU_CONTROLLER, CgroupV1.MEMORY_CONTROLLER] + + def check_in_expected_slice(self, expected_slice): + in_expected_slice = True + for controller, path in self._controller_paths.items(): + if expected_slice not in path: + log_cgroup_warning("The {0} controller for the {1} cgroup is not mounted in the expected slice. Expected slice: {2}. Actual controller path: {3}".format(controller, self._cgroup_name, expected_slice, path), send_event=False) + in_expected_slice = False + + return in_expected_slice + + def get_controllers(self, expected_relative_path=None): + controllers = [] + + for supported_controller_name in self.get_supported_controller_names(): + controller = None + controller_path = self._controller_paths.get(supported_controller_name) + controller_mountpoint = self._controller_mountpoints.get(supported_controller_name) + + if controller_mountpoint is None: + # Do not send telemetry here. We already have telemetry for unmounted controllers in cgroup init + log_cgroup_warning("{0} controller is not mounted; will not track".format(supported_controller_name), send_event=False) + continue + + if controller_path is None: + log_cgroup_warning("{0} is not mounted for the {1} cgroup; will not track".format(supported_controller_name, self._cgroup_name)) + continue + + if expected_relative_path is not None: + expected_path = os.path.join(controller_mountpoint, expected_relative_path) + if controller_path != expected_path: + log_cgroup_warning("The {0} controller is not mounted at the expected path for the {1} cgroup; will not track. Actual cgroup path:[{2}] Expected:[{3}]".format(supported_controller_name, self._cgroup_name, controller_path, expected_path)) + continue + + if supported_controller_name == self.CPU_CONTROLLER: + controller = CpuControllerV1(self._cgroup_name, controller_path) + elif supported_controller_name == self.MEMORY_CONTROLLER: + controller = MemoryControllerV1(self._cgroup_name, controller_path) + + if controller is not None: + msg = "{0} controller for cgroup: {1}".format(supported_controller_name, controller) + log_cgroup_info(msg) + controllers.append(controller) + + return controllers + + def get_controller_procs_path(self, controller): + controller_path = self._controller_paths.get(controller) + if controller_path is not None and controller_path != "": + return os.path.join(controller_path, "cgroup.procs") + return "" + + def get_processes(self): + pids = set() + for controller in self._controller_paths.keys(): + procs_path = self.get_controller_procs_path(controller) + if os.path.exists(procs_path): + with open(procs_path, "r") as cgroup_procs: + for pid in cgroup_procs.read().split(): + pids.add(int(pid)) + return list(pids) + + +class CgroupV2(Cgroup): + CPU_CONTROLLER = "cpu" + + def __init__(self, cgroup_name, root_cgroup_path, cgroup_path, enabled_controllers): """ - return CGroupsApi._foreach_legacy_cgroup(lambda *_: None) + :param cgroup_name: The name of the cgroup. Used for logging/tracking purposes. + :param root_cgroup_path: A string representing the root cgroup path. String can be empty. + :param cgroup_path: A string representing the absolute cgroup path. String can be empty. + :param enabled_controllers: A list of strings representing the agent supported controllers enabled at the root cgroup. + """ + super(CgroupV2, self).__init__(cgroup_name) + self._root_cgroup_path = root_cgroup_path + self._cgroup_path = cgroup_path + self._enabled_controllers = enabled_controllers + + @staticmethod + def get_supported_controller_names(): + return [CgroupV2.CPU_CONTROLLER, CgroupV2.MEMORY_CONTROLLER] + + def check_in_expected_slice(self, expected_slice): + if expected_slice not in self._cgroup_path: + log_cgroup_warning("The {0} cgroup is not in the expected slice. Expected slice: {1}. Actual cgroup path: {2}".format(self._cgroup_name, expected_slice, self._cgroup_path), send_event=False) + return False + + return True + + def get_controllers(self, expected_relative_path=None): + controllers = [] + + for supported_controller_name in self.get_supported_controller_names(): + controller = None + + if supported_controller_name not in self._enabled_controllers: + # Do not send telemetry here. We already have telemetry for disabled controllers in cgroup init + log_cgroup_warning("{0} controller is not enabled; will not track".format(supported_controller_name), + send_event=False) + continue + + if self._cgroup_path == "": + log_cgroup_warning("Cgroup path for {0} cannot be determined; will not track".format(self._cgroup_name)) + continue + + if expected_relative_path is not None: + expected_path = os.path.join(self._root_cgroup_path, expected_relative_path) + if self._cgroup_path != expected_path: + log_cgroup_warning( + "The {0} cgroup is not mounted at the expected path; will not track. Actual cgroup path:[{1}] Expected:[{2}]".format( + self._cgroup_name, self._cgroup_path, expected_path)) + continue + + if supported_controller_name == self.CPU_CONTROLLER: + controller = CpuControllerV2(self._cgroup_name, self._cgroup_path) + elif supported_controller_name == self.MEMORY_CONTROLLER: + controller = MemoryControllerV2(self._cgroup_name, self._cgroup_path) + + if controller is not None: + msg = "{0} controller for cgroup: {1}".format(supported_controller_name, controller) + log_cgroup_info(msg) + controllers.append(controller) + + return controllers + + def get_procs_path(self): + if self._cgroup_path != "": + return os.path.join(self._cgroup_path, "cgroup.procs") + return "" + + def get_processes(self): + pids = set() + procs_path = self.get_procs_path() + if os.path.exists(procs_path): + with open(procs_path, "r") as cgroup_procs: + for pid in cgroup_procs.read().split(): + pids.add(int(pid)) + return list(pids) + + diff --git a/azurelinuxagent/ga/cgroupconfigurator.py b/azurelinuxagent/ga/cgroupconfigurator.py index e52fc15d0d..22634bb64c 100644 --- a/azurelinuxagent/ga/cgroupconfigurator.py +++ b/azurelinuxagent/ga/cgroupconfigurator.py @@ -23,12 +23,15 @@ from azurelinuxagent.common import conf from azurelinuxagent.common import logger -from azurelinuxagent.ga.cgroup import CpuCgroup, AGENT_NAME_TELEMETRY, MetricsCounter, MemoryCgroup -from azurelinuxagent.ga.cgroupapi import CGroupsApi, SystemdCgroupsApi, SystemdRunError, EXTENSION_SLICE_PREFIX +from azurelinuxagent.ga.cgroupcontroller import AGENT_NAME_TELEMETRY, MetricsCounter +from azurelinuxagent.ga.cgroupapi import SystemdRunError, EXTENSION_SLICE_PREFIX, CGroupUtil, SystemdCgroupApiv2, \ + log_cgroup_info, log_cgroup_warning, get_cgroup_api, InvalidCgroupMountpointException from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry +from azurelinuxagent.ga.cpucontroller import _CpuController +from azurelinuxagent.ga.memorycontroller import _MemoryController from azurelinuxagent.common.exception import ExtensionErrorCodes, CGroupsException, AgentMemoryExceededException from azurelinuxagent.common.future import ustr -from azurelinuxagent.common.osutil import get_osutil, systemd +from azurelinuxagent.common.osutil import systemd from azurelinuxagent.common.version import get_distro from azurelinuxagent.common.utils import shellutil, fileutil from azurelinuxagent.ga.extensionprocessutil import handle_process_completion @@ -65,18 +68,11 @@ LOGCOLLECTOR_SLICE = "azure-walinuxagent-logcollector.slice" # More info on resource limits properties in systemd here: # https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/resource_management_guide/sec-modifying_control_groups -_LOGCOLLECTOR_SLICE_CONTENTS_FMT = """ -[Unit] -Description=Slice for Azure VM Agent Periodic Log Collector -DefaultDependencies=no -Before=slices.target -[Slice] -CPUAccounting=yes -CPUQuota={cpu_quota} -MemoryAccounting=yes -""" -_LOGCOLLECTOR_CPU_QUOTA = "5%" -LOGCOLLECTOR_MEMORY_LIMIT = 30 * 1024 ** 2 # 30Mb +LOGCOLLECTOR_CPU_QUOTA_FOR_V1_AND_V2 = "5%" +LOGCOLLECTOR_MEMORY_THROTTLE_LIMIT_FOR_V2 = "170M" +LOGCOLLECTOR_MAX_THROTTLED_EVENTS_FOR_V2 = 10 +LOGCOLLECTOR_ANON_MEMORY_LIMIT_FOR_V1_AND_V2 = 25 * 1024 ** 2 # 25Mb +LOGCOLLECTOR_CACHE_MEMORY_LIMIT_FOR_V1_AND_V2 = 155 * 1024 ** 2 # 155Mb _AGENT_DROP_IN_FILE_SLICE = "10-Slice.conf" _AGENT_DROP_IN_FILE_SLICE_CONTENTS = """ @@ -114,18 +110,6 @@ class DisableCgroups(object): EXTENSIONS = "extensions" -def _log_cgroup_info(format_string, *args): - message = format_string.format(*args) - logger.info("[CGI] " + message) - add_event(op=WALAEventOperation.CGroupsInfo, message=message) - - -def _log_cgroup_warning(format_string, *args): - message = format_string.format(*args) - logger.info("[CGW] " + message) # log as INFO for now, in the future it should be logged as WARNING - add_event(op=WALAEventOperation.CGroupsInfo, message=message, is_success=False, log_event=False) - - class CGroupConfigurator(object): """ This class implements the high-level operations on CGroups (e.g. initialization, creation, etc) @@ -141,185 +125,117 @@ def __init__(self): self._agent_cgroups_enabled = False self._extensions_cgroups_enabled = False self._cgroups_api = None - self._agent_cpu_cgroup_path = None - self._agent_memory_cgroup_path = None - self._agent_memory_cgroup = None + self._agent_cgroup = None + self._agent_memory_metrics = None self._check_cgroups_lock = threading.RLock() # Protect the check_cgroups which is called from Monitor thread and main loop. def initialize(self): try: if self._initialized: return - # This check is to reset the quotas if agent goes from cgroup supported to unsupported distros later in time. - if not CGroupsApi.cgroups_supported(): - agent_drop_in_path = systemd.get_agent_drop_in_path() - try: - if os.path.exists(agent_drop_in_path) and os.path.isdir(agent_drop_in_path): - files_to_cleanup = [] - agent_drop_in_file_slice = os.path.join(agent_drop_in_path, _AGENT_DROP_IN_FILE_SLICE) - agent_drop_in_file_cpu_accounting = os.path.join(agent_drop_in_path, - _DROP_IN_FILE_CPU_ACCOUNTING) - agent_drop_in_file_memory_accounting = os.path.join(agent_drop_in_path, - _DROP_IN_FILE_MEMORY_ACCOUNTING) - agent_drop_in_file_cpu_quota = os.path.join(agent_drop_in_path, _DROP_IN_FILE_CPU_QUOTA) - files_to_cleanup.extend([agent_drop_in_file_slice, agent_drop_in_file_cpu_accounting, - agent_drop_in_file_memory_accounting, agent_drop_in_file_cpu_quota]) - self.__cleanup_all_files(files_to_cleanup) - self.__reload_systemd_config() - logger.info("Agent reset the quotas if distro: {0} goes from supported to unsupported list", get_distro()) - except Exception as err: - logger.warn("Unable to delete Agent drop-in files while resetting the quotas: {0}".format(err)) - # check whether cgroup monitoring is supported on the current distro - self._cgroups_supported = CGroupsApi.cgroups_supported() + self._cgroups_supported = CGroupUtil.cgroups_supported() if not self._cgroups_supported: - logger.info("Cgroup monitoring is not supported on {0}", get_distro()) + log_cgroup_info("Cgroup monitoring is not supported on {0}".format(get_distro()), send_event=True) + # If a distro is not supported, attempt to clean up any existing drop in files in case it was + # previously supported. It is necessary to cleanup in this scenario in case the OS hits any bugs on + # the kernel related to cgroups. + log_cgroup_info("Agent will reset the quotas in case distro: {0} went from supported to unsupported".format(get_distro()), send_event=False) + self._reset_agent_cgroup_setup() return # check that systemd is detected correctly - self._cgroups_api = SystemdCgroupsApi() if not systemd.is_systemd(): - _log_cgroup_warning("systemd was not detected on {0}", get_distro()) + log_cgroup_warning("systemd was not detected on {0}".format(get_distro())) return - _log_cgroup_info("systemd version: {0}", systemd.get_version()) - - # This is temporarily disabled while we analyze telemetry. Likely it will be removed. - # self.__collect_azure_unit_telemetry() - # self.__collect_agent_unit_files_telemetry() + log_cgroup_info("systemd version: {0}".format(systemd.get_version())) if not self.__check_no_legacy_cgroups(): return + # Determine which version of the Cgroup Api should be used. If the correct version can't be determined, + # do not enable resource monitoring/enforcement. + try: + self._cgroups_api = get_cgroup_api() + except InvalidCgroupMountpointException as e: + # Systemd mounts the cgroup file system at '/sys/fs/cgroup'. Previously, the agent supported cgroup + # usage if a user mounted the cgroup filesystem elsewhere. The agent no longer supports that + # scenario. Cleanup any existing drop in files in case the agent previously supported cgroups on + # this machine. + log_cgroup_warning("The agent does not support cgroups if the default systemd mountpoint is not being used: {0}".format(ustr(e)), send_event=True) + log_cgroup_info("Agent will reset the quotas in case cgroup usage went from enabled to disabled") + self._reset_agent_cgroup_setup() + return + except CGroupsException as e: + log_cgroup_warning("Unable to determine which cgroup version to use: {0}".format(ustr(e)), send_event=True) + return + + # TODO: Move this and systemd system check to cgroups_supported logic above + if self.using_cgroup_v2(): + log_cgroup_info("Agent and extensions resource monitoring is not currently supported on cgroup v2") + return + + # We check the agent unit 'Slice' property before setting up azure.slice. This check is done first + # because the agent's Slice unit property will be 'azure.slice' if the slice drop-in file exists, even + # though systemd has not moved the agent to azure.slice yet. Systemd will only move the agent to + # azure.slice after a service restart. agent_unit_name = systemd.get_agent_unit_name() agent_slice = systemd.get_unit_property(agent_unit_name, "Slice") if agent_slice not in (AZURE_SLICE, "system.slice"): - _log_cgroup_warning("The agent is within an unexpected slice: {0}", agent_slice) + log_cgroup_warning("The agent is within an unexpected slice: {0}".format(agent_slice)) return + # Notes about slice setup: + # On first agent update (for machines where daemon version did not already create azure.slice), the + # agent creates azure.slice and the agent unit Slice drop-in file, but systemd does not move the agent + # unit to azure.slice until service restart. It is ok to enable cgroup usage in this case if agent is + # running in system.slice. + self.__setup_azure_slice() - cpu_controller_root, memory_controller_root = self.__get_cgroup_controllers() - self._agent_cpu_cgroup_path, self._agent_memory_cgroup_path = self.__get_agent_cgroups(agent_slice, - cpu_controller_root, - memory_controller_root) + # Log mount points/root paths for cgroup controllers + self._cgroups_api.log_root_paths() - if self._agent_cpu_cgroup_path is not None or self._agent_memory_cgroup_path is not None: - self.enable() + # Get agent cgroup + self._agent_cgroup = self._cgroups_api.get_process_cgroup(process_id="self", cgroup_name=AGENT_NAME_TELEMETRY) - if self._agent_cpu_cgroup_path is not None: - _log_cgroup_info("Agent CPU cgroup: {0}", self._agent_cpu_cgroup_path) - self.__set_cpu_quota(conf.get_agent_cpu_quota()) - CGroupsTelemetry.track_cgroup(CpuCgroup(AGENT_NAME_TELEMETRY, self._agent_cpu_cgroup_path)) + if conf.get_cgroup_disable_on_process_check_failure() and self._check_fails_if_processes_found_in_agent_cgroup_before_enable(agent_slice): + reason = "Found unexpected processes in the agent cgroup before agent enable cgroups." + self.disable(reason, DisableCgroups.ALL) + return - if self._agent_memory_cgroup_path is not None: - _log_cgroup_info("Agent Memory cgroup: {0}", self._agent_memory_cgroup_path) - self._agent_memory_cgroup = MemoryCgroup(AGENT_NAME_TELEMETRY, self._agent_memory_cgroup_path) - CGroupsTelemetry.track_cgroup(self._agent_memory_cgroup) + # Get controllers to track + agent_controllers = self._agent_cgroup.get_controllers(expected_relative_path=os.path.join(agent_slice, systemd.get_agent_unit_name())) + if len(agent_controllers) > 0: + self.enable() - _log_cgroup_info('Agent cgroups enabled: {0}', self._agent_cgroups_enabled) + for controller in agent_controllers: + for prop in controller.get_unit_properties(): + log_cgroup_info('Agent {0} unit property value: {1}'.format(prop, systemd.get_unit_property(systemd.get_agent_unit_name(), prop))) + if isinstance(controller, _CpuController): + self.__set_cpu_quota(conf.get_agent_cpu_quota()) + elif isinstance(controller, _MemoryController): + self._agent_memory_metrics = controller + CGroupsTelemetry.track_cgroup_controller(controller) except Exception as exception: - _log_cgroup_warning("Error initializing cgroups: {0}", ustr(exception)) + log_cgroup_warning("Error initializing cgroups: {0}".format(ustr(exception))) finally: + log_cgroup_info('Agent cgroups enabled: {0}'.format(self._agent_cgroups_enabled)) self._initialized = True - @staticmethod - def __collect_azure_unit_telemetry(): - azure_units = [] - - try: - units = shellutil.run_command(['systemctl', 'list-units', 'azure*', '-all']) - for line in units.split('\n'): - match = re.match(r'\s?(azure[^\s]*)\s?', line, re.IGNORECASE) - if match is not None: - azure_units.append((match.group(1), line)) - except shellutil.CommandError as command_error: - _log_cgroup_warning("Failed to list systemd units: {0}", ustr(command_error)) - - for unit_name, unit_description in azure_units: - unit_slice = "Unknown" - try: - unit_slice = systemd.get_unit_property(unit_name, "Slice") - except Exception as exception: - _log_cgroup_warning("Failed to query Slice for {0}: {1}", unit_name, ustr(exception)) - - _log_cgroup_info("Found an Azure unit under slice {0}: {1}", unit_slice, unit_description) - - if len(azure_units) == 0: - try: - cgroups = shellutil.run_command('systemd-cgls') - for line in cgroups.split('\n'): - if re.match(r'[^\x00-\xff]+azure\.slice\s*', line, re.UNICODE): - logger.info(ustr("Found a cgroup for azure.slice\n{0}").format(cgroups)) - # Don't add the output of systemd-cgls to the telemetry, since currently it does not support Unicode - add_event(op=WALAEventOperation.CGroupsInfo, message="Found a cgroup for azure.slice") - except shellutil.CommandError as command_error: - _log_cgroup_warning("Failed to list systemd units: {0}", ustr(command_error)) - - @staticmethod - def __collect_agent_unit_files_telemetry(): - agent_unit_files = [] - agent_service_name = get_osutil().get_service_name() - try: - fragment_path = systemd.get_unit_property(agent_service_name, "FragmentPath") - if fragment_path != systemd.get_agent_unit_file(): - agent_unit_files.append(fragment_path) - except Exception as exception: - _log_cgroup_warning("Failed to query the agent's FragmentPath: {0}", ustr(exception)) - - try: - drop_in_paths = systemd.get_unit_property(agent_service_name, "DropInPaths") - for path in drop_in_paths.split(): - agent_unit_files.append(path) - except Exception as exception: - _log_cgroup_warning("Failed to query the agent's DropInPaths: {0}", ustr(exception)) - - for unit_file in agent_unit_files: - try: - with open(unit_file, "r") as file_object: - _log_cgroup_info("Found a custom unit file for the agent: {0}\n{1}", unit_file, - file_object.read()) - except Exception as exception: - _log_cgroup_warning("Can't read {0}: {1}", unit_file, ustr(exception)) - def __check_no_legacy_cgroups(self): """ Older versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent. When running under systemd this could produce invalid resource usage data. Cgroups should not be enabled under this condition. """ - legacy_cgroups = self._cgroups_api.cleanup_legacy_cgroups() + legacy_cgroups = CGroupUtil.cleanup_legacy_cgroups() if legacy_cgroups > 0: - _log_cgroup_warning("The daemon's PID was added to a legacy cgroup; will not monitor resource usage.") + log_cgroup_warning("The daemon's PID was added to a legacy cgroup; will not monitor resource usage.") return False return True - def __get_cgroup_controllers(self): - # - # check v1 controllers - # - cpu_controller_root, memory_controller_root = self._cgroups_api.get_cgroup_mount_points() - - if cpu_controller_root is not None: - logger.info("The CPU cgroup controller is mounted at {0}", cpu_controller_root) - else: - _log_cgroup_warning("The CPU cgroup controller is not mounted") - - if memory_controller_root is not None: - logger.info("The memory cgroup controller is mounted at {0}", memory_controller_root) - else: - _log_cgroup_warning("The memory cgroup controller is not mounted") - - # - # check v2 controllers - # - cgroup2_mount_point, cgroup2_controllers = self._cgroups_api.get_cgroup2_controllers() - if cgroup2_mount_point is not None: - _log_cgroup_info("cgroups v2 mounted at {0}. Controllers: [{1}]", cgroup2_mount_point, - cgroup2_controllers) - - return cpu_controller_root, memory_controller_root - @staticmethod def __setup_azure_slice(): """ @@ -368,9 +284,8 @@ def __setup_azure_slice(): if not os.path.exists(vmextensions_slice): files_to_create.append((vmextensions_slice, _VMEXTENSIONS_SLICE_CONTENTS)) - # Update log collector slice contents - slice_contents = _LOGCOLLECTOR_SLICE_CONTENTS_FMT.format(cpu_quota=_LOGCOLLECTOR_CPU_QUOTA) - files_to_create.append((logcollector_slice, slice_contents)) + # New agent will setup limits for scope instead slice, so removing existing logcollector slice. + CGroupConfigurator._Impl.__cleanup_unit_file(logcollector_slice) if fileutil.findre_in_file(agent_unit_file, r"Slice=") is not None: CGroupConfigurator._Impl.__cleanup_unit_file(agent_drop_in_file_slice) @@ -397,39 +312,59 @@ def __setup_azure_slice(): for path, contents in files_to_create: CGroupConfigurator._Impl.__create_unit_file(path, contents) except Exception as exception: - _log_cgroup_warning("Failed to create unit files for the azure slice: {0}", ustr(exception)) + log_cgroup_warning("Failed to create unit files for the azure slice: {0}".format(ustr(exception))) for unit_file in files_to_create: CGroupConfigurator._Impl.__cleanup_unit_file(unit_file) return CGroupConfigurator._Impl.__reload_systemd_config() + def _reset_agent_cgroup_setup(self): + try: + agent_drop_in_path = systemd.get_agent_drop_in_path() + if os.path.exists(agent_drop_in_path) and os.path.isdir(agent_drop_in_path): + files_to_cleanup = [] + agent_drop_in_file_slice = os.path.join(agent_drop_in_path, _AGENT_DROP_IN_FILE_SLICE) + agent_drop_in_file_cpu_accounting = os.path.join(agent_drop_in_path, + _DROP_IN_FILE_CPU_ACCOUNTING) + agent_drop_in_file_memory_accounting = os.path.join(agent_drop_in_path, + _DROP_IN_FILE_MEMORY_ACCOUNTING) + agent_drop_in_file_cpu_quota = os.path.join(agent_drop_in_path, _DROP_IN_FILE_CPU_QUOTA) + files_to_cleanup.extend([agent_drop_in_file_slice, agent_drop_in_file_cpu_accounting, + agent_drop_in_file_memory_accounting, agent_drop_in_file_cpu_quota]) + self.__cleanup_all_files(files_to_cleanup) + self.__reload_systemd_config() + except Exception as err: + logger.warn("Unable to delete Agent drop-in files while resetting the quotas: {0}".format(err)) + @staticmethod def __reload_systemd_config(): # reload the systemd configuration; the new slices will be used once the agent's service restarts try: - logger.info("Executing systemctl daemon-reload...") + log_cgroup_info("Executing systemctl daemon-reload...", send_event=False) shellutil.run_command(["systemctl", "daemon-reload"]) except Exception as exception: - _log_cgroup_warning("daemon-reload failed (create azure slice): {0}", ustr(exception)) + log_cgroup_warning("daemon-reload failed (create azure slice): {0}".format(ustr(exception))) + # W0238: Unused private member `_Impl.__create_unit_file(path, contents)` (unused-private-member) @staticmethod - def __create_unit_file(path, contents): + def __create_unit_file(path, contents): # pylint: disable=unused-private-member parent, _ = os.path.split(path) if not os.path.exists(parent): fileutil.mkdir(parent, mode=0o755) exists = os.path.exists(path) fileutil.write_file(path, contents) - _log_cgroup_info("{0} {1}", "Updated" if exists else "Created", path) + log_cgroup_info("{0} {1}".format("Updated" if exists else "Created", path)) + # W0238: Unused private member `_Impl.__cleanup_unit_file(path)` (unused-private-member) @staticmethod - def __cleanup_unit_file(path): + def __cleanup_unit_file(path): # pylint: disable=unused-private-member if os.path.exists(path): try: os.remove(path) - _log_cgroup_info("Removed {0}", path) + log_cgroup_info("Removed {0}".format(path)) except Exception as exception: - _log_cgroup_warning("Failed to remove {0}: {1}", path, ustr(exception)) + log_cgroup_warning("Failed to remove {0}: {1}".format(path, ustr(exception))) @staticmethod def __cleanup_all_files(files_to_cleanup): @@ -437,9 +372,9 @@ def __cleanup_all_files(files_to_cleanup): if os.path.exists(path): try: os.remove(path) - _log_cgroup_info("Removed {0}", path) + log_cgroup_info("Removed {0}".format(path)) except Exception as exception: - _log_cgroup_warning("Failed to remove {0}: {1}", path, ustr(exception)) + log_cgroup_warning("Failed to remove {0}: {1}".format(path, ustr(exception))) @staticmethod def __create_all_files(files_to_create): @@ -448,20 +383,20 @@ def __create_all_files(files_to_create): for path, contents in files_to_create: CGroupConfigurator._Impl.__create_unit_file(path, contents) except Exception as exception: - _log_cgroup_warning("Failed to create unit files : {0}", ustr(exception)) + log_cgroup_warning("Failed to create unit files : {0}".format(ustr(exception))) for unit_file in files_to_create: CGroupConfigurator._Impl.__cleanup_unit_file(unit_file) return def is_extension_resource_limits_setup_completed(self, extension_name, cpu_quota=None): unit_file_install_path = systemd.get_unit_file_install_path() - old_extension_slice_path = os.path.join(unit_file_install_path, SystemdCgroupsApi.get_extension_slice_name(extension_name, old_slice=True)) + old_extension_slice_path = os.path.join(unit_file_install_path, CGroupUtil.get_extension_slice_name(extension_name, old_slice=True)) # clean up the old slice from the disk if os.path.exists(old_extension_slice_path): CGroupConfigurator._Impl.__cleanup_unit_file(old_extension_slice_path) extension_slice_path = os.path.join(unit_file_install_path, - SystemdCgroupsApi.get_extension_slice_name(extension_name)) + CGroupUtil.get_extension_slice_name(extension_name)) cpu_quota = str( cpu_quota) + "%" if cpu_quota is not None else "" # setting an empty value resets to the default (infinity) slice_contents = _EXTENSION_SLICE_CONTENTS.format(extension_name=extension_name, @@ -472,51 +407,6 @@ def is_extension_resource_limits_setup_completed(self, extension_name, cpu_quota return True return False - def __get_agent_cgroups(self, agent_slice, cpu_controller_root, memory_controller_root): - agent_unit_name = systemd.get_agent_unit_name() - - expected_relative_path = os.path.join(agent_slice, agent_unit_name) - cpu_cgroup_relative_path, memory_cgroup_relative_path = self._cgroups_api.get_process_cgroup_relative_paths( - "self") - - if cpu_cgroup_relative_path is None: - _log_cgroup_warning("The agent's process is not within a CPU cgroup") - else: - if cpu_cgroup_relative_path == expected_relative_path: - _log_cgroup_info('CPUAccounting: {0}', systemd.get_unit_property(agent_unit_name, "CPUAccounting")) - _log_cgroup_info('CPUQuota: {0}', systemd.get_unit_property(agent_unit_name, "CPUQuotaPerSecUSec")) - else: - _log_cgroup_warning( - "The Agent is not in the expected CPU cgroup; will not enable monitoring. Cgroup:[{0}] Expected:[{1}]", - cpu_cgroup_relative_path, - expected_relative_path) - cpu_cgroup_relative_path = None # Set the path to None to prevent monitoring - - if memory_cgroup_relative_path is None: - _log_cgroup_warning("The agent's process is not within a memory cgroup") - else: - if memory_cgroup_relative_path == expected_relative_path: - memory_accounting = systemd.get_unit_property(agent_unit_name, "MemoryAccounting") - _log_cgroup_info('MemoryAccounting: {0}', memory_accounting) - else: - _log_cgroup_info( - "The Agent is not in the expected memory cgroup; will not enable monitoring. CGroup:[{0}] Expected:[{1}]", - memory_cgroup_relative_path, - expected_relative_path) - memory_cgroup_relative_path = None # Set the path to None to prevent monitoring - - if cpu_controller_root is not None and cpu_cgroup_relative_path is not None: - agent_cpu_cgroup_path = os.path.join(cpu_controller_root, cpu_cgroup_relative_path) - else: - agent_cpu_cgroup_path = None - - if memory_controller_root is not None and memory_cgroup_relative_path is not None: - agent_memory_cgroup_path = os.path.join(memory_controller_root, memory_cgroup_relative_path) - else: - agent_memory_cgroup_path = None - - return agent_cpu_cgroup_path, agent_memory_cgroup_path - def supported(self): return self._cgroups_supported @@ -529,6 +419,9 @@ def agent_enabled(self): def extensions_enabled(self): return self._extensions_cgroups_enabled + def using_cgroup_v2(self): + return isinstance(self._cgroups_api, SystemdCgroupApiv2) + def enable(self): if not self.supported(): raise CGroupsException( @@ -542,7 +435,7 @@ def disable(self, reason, disable_cgroups): self.__reset_agent_cpu_quota() extension_services = self.get_extension_services_list() for extension in extension_services: - logger.info("Resetting extension : {0} and it's services: {1} CPUQuota".format(extension, extension_services[extension])) + log_cgroup_info("Resetting extension : {0} and it's services: {1} CPUQuota".format(extension, extension_services[extension]), send_event=False) self.__reset_extension_cpu_quota(extension_name=extension) self.__reset_extension_services_cpu_quota(extension_services[extension]) self.__reload_systemd_config() @@ -553,11 +446,13 @@ def disable(self, reason, disable_cgroups): elif disable_cgroups == DisableCgroups.AGENT: # disable agent self._agent_cgroups_enabled = False self.__reset_agent_cpu_quota() - CGroupsTelemetry.stop_tracking(CpuCgroup(AGENT_NAME_TELEMETRY, self._agent_cpu_cgroup_path)) + agent_controllers = self._agent_cgroup.get_controllers() + for controller in agent_controllers: + if isinstance(controller, _CpuController): + CGroupsTelemetry.stop_tracking(controller) + break - message = "[CGW] Disabling resource usage monitoring. Reason: {0}".format(reason) - logger.info(message) # log as INFO for now, in the future it should be logged as WARNING - add_event(op=WALAEventOperation.CGroupsDisabled, message=message, is_success=False, log_event=False) + log_cgroup_warning("Disabling resource usage monitoring. Reason: {0}".format(reason), op=WALAEventOperation.CGroupsDisabled) @staticmethod def __set_cpu_quota(quota): @@ -568,7 +463,7 @@ def __set_cpu_quota(quota): over this setting. """ quota_percentage = "{0}%".format(quota) - _log_cgroup_info("Ensuring the agent's CPUQuota is {0}", quota_percentage) + log_cgroup_info("Ensuring the agent's CPUQuota is {0}".format(quota_percentage)) if CGroupConfigurator._Impl.__try_set_cpu_quota(quota_percentage): CGroupsTelemetry.set_track_throttled_time(True) @@ -580,13 +475,13 @@ def __reset_agent_cpu_quota(): NOTE: This resets the quota on the agent's default dropin file; any local overrides on the VM will take precedence over this setting. """ - logger.info("Resetting agent's CPUQuota") + log_cgroup_info("Resetting agent's CPUQuota", send_event=False) if CGroupConfigurator._Impl.__try_set_cpu_quota(''): # setting an empty value resets to the default (infinity) - _log_cgroup_info('CPUQuota: {0}', - systemd.get_unit_property(systemd.get_agent_unit_name(), "CPUQuotaPerSecUSec")) + log_cgroup_info('CPUQuota: {0}'.format(systemd.get_unit_property(systemd.get_agent_unit_name(), "CPUQuotaPerSecUSec"))) + # W0238: Unused private member `_Impl.__try_set_cpu_quota(quota)` (unused-private-member) @staticmethod - def __try_set_cpu_quota(quota): + def __try_set_cpu_quota(quota): # pylint: disable=unused-private-member try: drop_in_file = os.path.join(systemd.get_agent_drop_in_path(), _DROP_IN_FILE_CPU_QUOTA) contents = _DROP_IN_FILE_CPU_QUOTA_CONTENTS_FORMAT.format(quota) @@ -596,16 +491,36 @@ def __try_set_cpu_quota(quota): return True # no need to update the file; return here to avoid doing a daemon-reload CGroupConfigurator._Impl.__create_unit_file(drop_in_file, contents) except Exception as exception: - _log_cgroup_warning('Failed to set CPUQuota: {0}', ustr(exception)) + log_cgroup_warning('Failed to set CPUQuota: {0}'.format(ustr(exception))) return False try: - logger.info("Executing systemctl daemon-reload...") + log_cgroup_info("Executing systemctl daemon-reload...", send_event=False) shellutil.run_command(["systemctl", "daemon-reload"]) except Exception as exception: - _log_cgroup_warning("daemon-reload failed (set quota): {0}", ustr(exception)) + log_cgroup_warning("daemon-reload failed (set quota): {0}".format(ustr(exception))) return False return True + def _check_fails_if_processes_found_in_agent_cgroup_before_enable(self, agent_slice): + """ + This check ensures that before we enable the agent's cgroups, there are no unexpected processes in the agent's cgroup already. + + The issue we observed that long running extension processes may be in agent cgroups if agent goes this cycle enabled(1)->disabled(2)->enabled(3). + 1. Agent cgroups enabled in some version + 2. Disabled agent cgroups due to check_cgroups regular check. Once we disable the cgroups we don't run the extensions in it's own slice, so they will be in agent cgroups. + 3. When ext_hanlder restart and enable the cgroups again, already running processes from step 2 still be in agent cgroups. This may cause the extensions run with agent limit. + """ + if agent_slice != AZURE_SLICE: + return False + try: + log_cgroup_info("Checking for unexpected processes in the agent's cgroup before enabling cgroups") + self._check_processes_in_agent_cgroup() + except CGroupsException as exception: + log_cgroup_warning(ustr(exception)) + return True + + return False + def check_cgroups(self, cgroup_metrics): self._check_cgroups_lock.acquire() try: @@ -651,6 +566,7 @@ def _check_processes_in_agent_cgroup(self): """ unexpected = [] agent_cgroup_proc_names = [] + try: daemon = os.getppid() extension_handler = os.getpid() @@ -658,12 +574,12 @@ def _check_processes_in_agent_cgroup(self): agent_commands.update(shellutil.get_running_commands()) systemd_run_commands = set() systemd_run_commands.update(self._cgroups_api.get_systemd_run_commands()) - agent_cgroup = CGroupsApi.get_processes_in_cgroup(self._agent_cpu_cgroup_path) + agent_cgroup_proccesses = self._agent_cgroup.get_processes() # get the running commands again in case new commands started or completed while we were fetching the processes in the cgroup; agent_commands.update(shellutil.get_running_commands()) systemd_run_commands.update(self._cgroups_api.get_systemd_run_commands()) - for process in agent_cgroup: + for process in agent_cgroup_proccesses: agent_cgroup_proc_names.append(self.__format_process(process)) # Note that the agent uses systemd-run to start extensions; systemd-run belongs to the agent cgroup, though the extensions don't. if process in (daemon, extension_handler) or process in systemd_run_commands: @@ -686,12 +602,28 @@ def _check_processes_in_agent_cgroup(self): if len(unexpected) >= 5: # collect just a small sample break except Exception as exception: - _log_cgroup_warning("Error checking the processes in the agent's cgroup: {0}".format(ustr(exception))) + log_cgroup_warning("Error checking the processes in the agent's cgroup: {0}".format(ustr(exception))) if len(unexpected) > 0: self._report_agent_cgroups_procs(agent_cgroup_proc_names, unexpected) raise CGroupsException("The agent's cgroup includes unexpected processes: {0}".format(unexpected)) + def get_logcollector_unit_properties(self): + """ + Returns the systemd unit properties for the log collector process. + + Each property should be explicitly set (even if already included in the log collector slice) for the log + collector process to run in the transient scope directory with the expected accounting and limits. + """ + logcollector_properties = ["--property=CPUAccounting=yes", "--property=MemoryAccounting=yes", "--property=CPUQuota={0}".format(LOGCOLLECTOR_CPU_QUOTA_FOR_V1_AND_V2)] + if not self.using_cgroup_v2(): + return logcollector_properties + # Memory throttling limit is used when running log collector on v2 machines using the 'MemoryHigh' property. + # We do not use a systemd property to enforce memory on V1 because it invokes the OOM killer if the limit + # is exceeded. + logcollector_properties.append("--property=MemoryHigh={0}".format(LOGCOLLECTOR_MEMORY_THROTTLE_LIMIT_FOR_V2)) + return logcollector_properties + @staticmethod def _get_command(pid): try: @@ -787,8 +719,8 @@ def _check_agent_throttled_time(cgroup_metrics): raise CGroupsException("The agent has been throttled for {0} seconds".format(metric.value)) def check_agent_memory_usage(self): - if self.enabled() and self._agent_memory_cgroup: - metrics = self._agent_memory_cgroup.get_tracked_metrics() + if self.enabled() and self._agent_memory_metrics is not None: + metrics = self._agent_memory_metrics.get_tracked_metrics() current_usage = 0 for metric in metrics: if metric.counter == MetricsCounter.TOTAL_MEM_USAGE: @@ -814,62 +746,40 @@ def _get_parent(pid): return 0 def start_tracking_unit_cgroups(self, unit_name): - """ - TODO: Start tracking Memory Cgroups - """ try: - cpu_cgroup_path, memory_cgroup_path = self._cgroups_api.get_unit_cgroup_paths(unit_name) - - if cpu_cgroup_path is None: - logger.info("The CPU controller is not mounted; will not track resource usage") - else: - CGroupsTelemetry.track_cgroup(CpuCgroup(unit_name, cpu_cgroup_path)) + cgroup = self._cgroups_api.get_unit_cgroup(unit_name, unit_name) + controllers = cgroup.get_controllers() - if memory_cgroup_path is None: - logger.info("The Memory controller is not mounted; will not track resource usage") - else: - CGroupsTelemetry.track_cgroup(MemoryCgroup(unit_name, memory_cgroup_path)) + for controller in controllers: + CGroupsTelemetry.track_cgroup_controller(controller) except Exception as exception: - logger.info("Failed to start tracking resource usage for the extension: {0}", ustr(exception)) + log_cgroup_info("Failed to start tracking resource usage for the extension: {0}".format(ustr(exception)), send_event=False) def stop_tracking_unit_cgroups(self, unit_name): - """ - TODO: remove Memory cgroups from tracked list. - """ try: - cpu_cgroup_path, memory_cgroup_path = self._cgroups_api.get_unit_cgroup_paths(unit_name) + cgroup = self._cgroups_api.get_unit_cgroup(unit_name, unit_name) + controllers = cgroup.get_controllers() - if cpu_cgroup_path is not None: - CGroupsTelemetry.stop_tracking(CpuCgroup(unit_name, cpu_cgroup_path)) - - if memory_cgroup_path is not None: - CGroupsTelemetry.stop_tracking(MemoryCgroup(unit_name, memory_cgroup_path)) + for controller in controllers: + CGroupsTelemetry.stop_tracking(controller) except Exception as exception: - logger.info("Failed to stop tracking resource usage for the extension service: {0}", ustr(exception)) + log_cgroup_info("Failed to stop tracking resource usage for the extension service: {0}".format(ustr(exception)), send_event=False) def stop_tracking_extension_cgroups(self, extension_name): - """ - TODO: remove extension Memory cgroups from tracked list - """ try: - extension_slice_name = SystemdCgroupsApi.get_extension_slice_name(extension_name) - cgroup_relative_path = os.path.join(_AZURE_VMEXTENSIONS_SLICE, - extension_slice_name) - - cpu_cgroup_mountpoint, memory_cgroup_mountpoint = self._cgroups_api.get_cgroup_mount_points() - cpu_cgroup_path = os.path.join(cpu_cgroup_mountpoint, cgroup_relative_path) - memory_cgroup_path = os.path.join(memory_cgroup_mountpoint, cgroup_relative_path) - - if cpu_cgroup_path is not None: - CGroupsTelemetry.stop_tracking(CpuCgroup(extension_name, cpu_cgroup_path)) + extension_slice_name = CGroupUtil.get_extension_slice_name(extension_name) + cgroup_relative_path = os.path.join(_AZURE_VMEXTENSIONS_SLICE, extension_slice_name) - if memory_cgroup_path is not None: - CGroupsTelemetry.stop_tracking(MemoryCgroup(extension_name, memory_cgroup_path)) + cgroup = self._cgroups_api.get_cgroup_from_relative_path(relative_path=cgroup_relative_path, + cgroup_name=extension_name) + controllers = cgroup.get_controllers() + for controller in controllers: + CGroupsTelemetry.stop_tracking(controller) except Exception as exception: - logger.info("Failed to stop tracking resource usage for the extension service: {0}", ustr(exception)) + log_cgroup_info("Failed to stop tracking resource usage for the extension service: {0}".format(ustr(exception)), send_event=False) def start_extension_command(self, extension_name, command, cmd_name, timeout, shell, cwd, env, stdout, stderr, error_code=ExtensionErrorCodes.PluginUnknownFailure): @@ -923,19 +833,19 @@ def setup_extension_slice(self, extension_name, cpu_quota): if self.enabled(): unit_file_install_path = systemd.get_unit_file_install_path() extension_slice_path = os.path.join(unit_file_install_path, - SystemdCgroupsApi.get_extension_slice_name(extension_name)) + CGroupUtil.get_extension_slice_name(extension_name)) try: cpu_quota = str(cpu_quota) + "%" if cpu_quota is not None else "" # setting an empty value resets to the default (infinity) if cpu_quota == "": - _log_cgroup_info("CPUQuota not set for {0}", extension_name) + log_cgroup_info("CPUQuota not set for {0}".format(extension_name)) else: - _log_cgroup_info("Ensuring the {0}'s CPUQuota is {1}", extension_name, cpu_quota) + log_cgroup_info("Ensuring the {0}'s CPUQuota is {1}".format(extension_name, cpu_quota)) slice_contents = _EXTENSION_SLICE_CONTENTS.format(extension_name=extension_name, cpu_quota=cpu_quota) CGroupConfigurator._Impl.__create_unit_file(extension_slice_path, slice_contents) except Exception as exception: - _log_cgroup_warning("Failed to set the extension {0} slice and quotas: {1}", extension_name, - ustr(exception)) + log_cgroup_warning("Failed to set the extension {0} slice and quotas: {1}".format(extension_name, + ustr(exception))) CGroupConfigurator._Impl.__cleanup_unit_file(extension_slice_path) def remove_extension_slice(self, extension_name): @@ -945,7 +855,7 @@ def remove_extension_slice(self, extension_name): """ if self.enabled(): unit_file_install_path = systemd.get_unit_file_install_path() - extension_slice_name = SystemdCgroupsApi.get_extension_slice_name(extension_name) + extension_slice_name = CGroupUtil.get_extension_slice_name(extension_name) extension_slice_path = os.path.join(unit_file_install_path, extension_slice_name) if os.path.exists(extension_slice_path): self.stop_tracking_extension_cgroups(extension_name) @@ -976,7 +886,7 @@ def set_extension_services_cpu_memory_quota(self, services_list): cpu_quota = service.get('cpuQuotaPercentage', None) if cpu_quota is not None: cpu_quota = str(cpu_quota) + "%" - _log_cgroup_info("Ensuring the {0}'s CPUQuota is {1}", service_name, cpu_quota) + log_cgroup_info("Ensuring the {0}'s CPUQuota is {1}".format(service_name, cpu_quota)) drop_in_file_cpu_quota = os.path.join(drop_in_path, _DROP_IN_FILE_CPU_QUOTA) cpu_quota_contents = _DROP_IN_FILE_CPU_QUOTA_CONTENTS_FORMAT.format(cpu_quota) files_to_create.append((drop_in_file_cpu_quota, cpu_quota_contents)) @@ -1010,7 +920,7 @@ def __reset_extension_services_cpu_quota(self, services_list): files_to_create.append((drop_in_file_cpu_quota, cpu_quota_contents)) self.__create_all_files(files_to_create) except Exception as exception: - _log_cgroup_warning('Failed to reset CPUQuota for {0} : {1}', service_name, ustr(exception)) + log_cgroup_warning('Failed to reset CPUQuota for {0} : {1}'.format(service_name, ustr(exception))) def remove_extension_services_drop_in_files(self, services_list): """ @@ -1035,7 +945,7 @@ def remove_extension_services_drop_in_files(self, services_list): files_to_cleanup.append(drop_in_file_cpu_quota) CGroupConfigurator._Impl.__cleanup_all_files(files_to_cleanup) - _log_cgroup_info("Drop in files removed for {0}".format(service_name)) + log_cgroup_info("Drop in files removed for {0}".format(service_name)) def stop_tracking_extension_services_cgroups(self, services_list): """ @@ -1076,10 +986,10 @@ def get_extension_services_list(): services = resource_limits.get('services') if resource_limits else None extensions_services[extensions_name] = services except (IOError, OSError) as e: - _log_cgroup_warning( + log_cgroup_warning( 'Failed to load manifest file ({0}): {1}'.format(manifest_path, e.strerror)) except ValueError: - _log_cgroup_warning('Malformed manifest file ({0}).'.format(manifest_path)) + log_cgroup_warning('Malformed manifest file ({0}).'.format(manifest_path)) return extensions_services # unique instance for the singleton diff --git a/azurelinuxagent/ga/cgroupcontroller.py b/azurelinuxagent/ga/cgroupcontroller.py new file mode 100644 index 0000000000..a530553b21 --- /dev/null +++ b/azurelinuxagent/ga/cgroupcontroller.py @@ -0,0 +1,175 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ + +import errno +import os +from datetime import timedelta + +from azurelinuxagent.common import logger, conf +from azurelinuxagent.common.exception import CGroupsException +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.utils import fileutil + +_REPORT_EVERY_HOUR = timedelta(hours=1) +_DEFAULT_REPORT_PERIOD = timedelta(seconds=conf.get_cgroup_check_period()) + +AGENT_NAME_TELEMETRY = "walinuxagent.service" # Name used for telemetry; it needs to be consistent even if the name of the service changes +AGENT_LOG_COLLECTOR = "azure-walinuxagent-logcollector" + + +class CounterNotFound(Exception): + pass + + +class MetricValue(object): + """ + Class for defining all the required metric fields to send telemetry. + """ + + def __init__(self, category, counter, instance, value, report_period=_DEFAULT_REPORT_PERIOD): + self._category = category + self._counter = counter + self._instance = instance + self._value = value + self._report_period = report_period + + @property + def category(self): + return self._category + + @property + def counter(self): + return self._counter + + @property + def instance(self): + return self._instance + + @property + def value(self): + return self._value + + @property + def report_period(self): + return self._report_period + + +class MetricsCategory(object): + MEMORY_CATEGORY = "Memory" + CPU_CATEGORY = "CPU" + + +class MetricsCounter(object): + PROCESSOR_PERCENT_TIME = "% Processor Time" + THROTTLED_TIME = "Throttled Time (s)" + TOTAL_MEM_USAGE = "Total Memory Usage (B)" + ANON_MEM_USAGE = "Anon Memory Usage (B)" + CACHE_MEM_USAGE = "Cache Memory Usage (B)" + MAX_MEM_USAGE = "Max Memory Usage (B)" + SWAP_MEM_USAGE = "Swap Memory Usage (B)" + MEM_THROTTLED = "Total Memory Throttled Events" + AVAILABLE_MEM = "Available Memory (MB)" + USED_MEM = "Used Memory (MB)" + + +class _CgroupController(object): + def __init__(self, name, cgroup_path): + """ + Initialize _data collection for the controller + :param: name: Name of the CGroup + :param: cgroup_path: Path of the controller + :return: + """ + self.name = name + self.path = cgroup_path + + def __str__(self): + return "{0} [{1}]".format(self.name, self.path) + + def _get_cgroup_file(self, file_name): + return os.path.join(self.path, file_name) + + def _get_file_contents(self, file_name): + """ + Retrieve the contents of file. + + :param str file_name: Name of file within that metric controller + :return: Entire contents of the file + :rtype: str + """ + parameter_file = self._get_cgroup_file(file_name) + + return fileutil.read_file(parameter_file) + + def _get_parameters(self, parameter_name, first_line_only=False): + """ + Retrieve the values of a parameter from a controller. + Returns a list of values in the file. + + :param first_line_only: return only the first line. + :param str parameter_name: Name of file within that metric controller + :return: The first line of the file, without line terminator + :rtype: [str] + """ + result = [] + try: + values = self._get_file_contents(parameter_name).splitlines() + result = values[0] if first_line_only else values + except IndexError: + parameter_filename = self._get_cgroup_file(parameter_name) + logger.error("File {0} is empty but should not be".format(parameter_filename)) + raise CGroupsException("File {0} is empty but should not be".format(parameter_filename)) + except Exception as e: + if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101 + raise e + parameter_filename = self._get_cgroup_file(parameter_name) + raise CGroupsException("Exception while attempting to read {0}".format(parameter_filename), e) + return result + + def is_active(self): + """ + Returns True if any processes belong to the cgroup. In v1, cgroup.procs returns a list of the thread group IDs + belong to the cgroup. In v2, cgroup.procs returns a list of the process IDs belonging to the cgroup. + """ + try: + procs = self._get_parameters("cgroup.procs") + if procs: + return len(procs) != 0 + except (IOError, OSError) as e: + if e.errno == errno.ENOENT: + # only suppressing file not found exceptions. + pass + else: + logger.periodic_warn(logger.EVERY_HALF_HOUR, + 'Could not get list of procs from "cgroup.procs" file in the cgroup: {0}.' + ' Internal error: {1}'.format(self.path, ustr(e))) + except CGroupsException as e: + logger.periodic_warn(logger.EVERY_HALF_HOUR, + 'Could not get list of procs from "cgroup.procs" file in the cgroup: {0}.' + ' Internal error: {1}'.format(self.path, ustr(e))) + return False + + def get_tracked_metrics(self, **_): + """ + Retrieves the current value of the metrics tracked for this controller/cgroup and returns them as an array. + """ + raise NotImplementedError() + + def get_unit_properties(self): + """ + Returns a list of the unit properties to collect for the controller. + """ + raise NotImplementedError() diff --git a/azurelinuxagent/ga/cgroupstelemetry.py b/azurelinuxagent/ga/cgroupstelemetry.py index 5943b45ade..412f75f4f0 100644 --- a/azurelinuxagent/ga/cgroupstelemetry.py +++ b/azurelinuxagent/ga/cgroupstelemetry.py @@ -17,7 +17,7 @@ import threading from azurelinuxagent.common import logger -from azurelinuxagent.ga.cgroup import CpuCgroup +from azurelinuxagent.ga.cpucontroller import _CpuController from azurelinuxagent.common.future import ustr @@ -37,18 +37,18 @@ def get_track_throttled_time(): return CGroupsTelemetry._track_throttled_time @staticmethod - def track_cgroup(cgroup): + def track_cgroup_controller(cgroup_controller): """ - Adds the given item to the dictionary of tracked cgroups + Adds the given item to the dictionary of tracked cgroup controllers """ - if isinstance(cgroup, CpuCgroup): + if isinstance(cgroup_controller, _CpuController): # set the current cpu usage - cgroup.initialize_cpu_usage() + cgroup_controller.initialize_cpu_usage() with CGroupsTelemetry._rlock: - if not CGroupsTelemetry.is_tracked(cgroup.path): - CGroupsTelemetry._tracked[cgroup.path] = cgroup - logger.info("Started tracking cgroup {0}", cgroup) + if not CGroupsTelemetry.is_tracked(cgroup_controller.path): + CGroupsTelemetry._tracked[cgroup_controller.path] = cgroup_controller + logger.info("Started tracking cgroup {0}", cgroup_controller) @staticmethod def is_tracked(path): @@ -75,11 +75,11 @@ def stop_tracking(cgroup): @staticmethod def poll_all_tracked(): metrics = [] - inactive_cgroups = [] + inactive_controllers = [] with CGroupsTelemetry._rlock: - for cgroup in CGroupsTelemetry._tracked.values(): + for controller in CGroupsTelemetry._tracked.values(): try: - metrics.extend(cgroup.get_tracked_metrics(track_throttled_time=CGroupsTelemetry._track_throttled_time)) + metrics.extend(controller.get_tracked_metrics(track_throttled_time=CGroupsTelemetry._track_throttled_time)) except Exception as e: # There can be scenarios when the CGroup has been deleted by the time we are fetching the values # from it. This would raise IOError with file entry not found (ERRNO: 2). We do not want to log @@ -87,11 +87,11 @@ def poll_all_tracked(): # exceptions which could occur, which is why we do a periodic log for all the other errors. if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: # pylint: disable=E1101 logger.periodic_warn(logger.EVERY_HOUR, '[PERIODIC] Could not collect metrics for cgroup ' - '{0}. Error : {1}'.format(cgroup.name, ustr(e))) - if not cgroup.is_active(): - inactive_cgroups.append(cgroup) - for inactive_cgroup in inactive_cgroups: - CGroupsTelemetry.stop_tracking(inactive_cgroup) + '{0}. Error : {1}'.format(controller.name, ustr(e))) + if not controller.is_active(): + inactive_controllers.append(controller) + for inactive_controller in inactive_controllers: + CGroupsTelemetry.stop_tracking(inactive_controller) return metrics diff --git a/azurelinuxagent/ga/collect_logs.py b/azurelinuxagent/ga/collect_logs.py index 4987d865e9..488691a5aa 100644 --- a/azurelinuxagent/ga/collect_logs.py +++ b/azurelinuxagent/ga/collect_logs.py @@ -25,19 +25,17 @@ import azurelinuxagent.common.conf as conf from azurelinuxagent.common import logger -from azurelinuxagent.ga.cgroup import MetricsCounter -from azurelinuxagent.common.event import elapsed_milliseconds, add_event, WALAEventOperation, report_metric +from azurelinuxagent.ga.cgroupcontroller import MetricsCounter +from azurelinuxagent.common.event import elapsed_milliseconds, add_event, WALAEventOperation from azurelinuxagent.common.future import ustr from azurelinuxagent.ga.interfaces import ThreadHandlerInterface from azurelinuxagent.ga.logcollector import COMPRESSED_ARCHIVE_PATH, GRACEFUL_KILL_ERRCODE -from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator, LOGCOLLECTOR_MEMORY_LIMIT +from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator, LOGCOLLECTOR_ANON_MEMORY_LIMIT_FOR_V1_AND_V2, LOGCOLLECTOR_CACHE_MEMORY_LIMIT_FOR_V1_AND_V2, LOGCOLLECTOR_MAX_THROTTLED_EVENTS_FOR_V2 from azurelinuxagent.common.protocol.util import get_protocol_util from azurelinuxagent.common.utils import shellutil from azurelinuxagent.common.utils.shellutil import CommandError from azurelinuxagent.common.version import PY_VERSION_MAJOR, PY_VERSION_MINOR, AGENT_NAME, CURRENT_VERSION -_INITIAL_LOG_COLLECTION_DELAY = 5 * 60 # Five minutes of delay - def get_collect_logs_handler(): return CollectLogsHandler() @@ -46,18 +44,27 @@ def get_collect_logs_handler(): def is_log_collection_allowed(): # There are three conditions that need to be met in order to allow periodic log collection: # 1) It should be enabled in the configuration. - # 2) The system must be using cgroups to manage services. Needed for resource limiting of the log collection. + # 2) The system must be using cgroups to manage services - needed for resource limiting of the log collection. The + # agent currently fully supports resource limiting for v1, but only supports log collector resource limiting for v2 + # if enabled via configuration. + # This condition is True if either: + # a. cgroup usage in the agent is enabled; OR + # b. the machine is using cgroup v2 and v2 resource limiting is enabled in the configuration. # 3) The python version must be greater than 2.6 in order to support the ZipFile library used when collecting. conf_enabled = conf.get_collect_logs() cgroups_enabled = CGroupConfigurator.get_instance().enabled() + cgroup_v2_resource_limiting_enabled = CGroupConfigurator.get_instance().using_cgroup_v2() and conf.get_enable_cgroup_v2_resource_limiting() supported_python = PY_VERSION_MINOR >= 6 if PY_VERSION_MAJOR == 2 else PY_VERSION_MAJOR == 3 - is_allowed = conf_enabled and cgroups_enabled and supported_python + is_allowed = conf_enabled and (cgroups_enabled or cgroup_v2_resource_limiting_enabled) and supported_python msg = "Checking if log collection is allowed at this time [{0}]. All three conditions must be met: " \ - "configuration enabled [{1}], cgroups enabled [{2}], python supported: [{3}]".format(is_allowed, - conf_enabled, - cgroups_enabled, - supported_python) + "1. configuration enabled [{1}], " \ + "2. cgroups v1 enabled [{2}] OR cgroups v2 is in use and v2 resource limiting configuration enabled [{3}], " \ + "3. python supported: [{4}]".format(is_allowed, + conf_enabled, + cgroups_enabled, + cgroup_v2_resource_limiting_enabled, + supported_python) logger.info(msg) add_event( name=AGENT_NAME, @@ -116,8 +123,8 @@ def is_alive(self): def start(self): self.event_thread = threading.Thread(target=self.daemon) - self.event_thread.setDaemon(True) - self.event_thread.setName(self.get_thread_name()) + self.event_thread.daemon = True + self.event_thread.name = self.get_thread_name() self.event_thread.start() def join(self): @@ -144,7 +151,7 @@ def init_protocols(self): def daemon(self): # Delay the first collector on start up to give short lived VMs (that might be dead before the second # collection has a chance to run) an opportunity to do produce meaningful logs to collect. - time.sleep(_INITIAL_LOG_COLLECTION_DELAY) + time.sleep(conf.get_log_collector_initial_delay()) try: CollectLogsHandler.enable_monitor_cgroups_check() @@ -171,15 +178,13 @@ def collect_and_send_logs(self): def _collect_logs(self): logger.info("Starting log collection...") - # Invoke the command line tool in the agent to collect logs, with resource limits on CPU. - # Some distros like ubuntu20 by default cpu and memory accounting enabled. Thus create nested cgroups under the logcollector slice - # So disabling CPU and Memory accounting prevents from creating nested cgroups, so that all the counters will be present in logcollector Cgroup - + # Invoke the command line tool in the agent to collect logs. The --scope option starts the process as a systemd + # transient scope unit. The --property option is used to set systemd memory and cpu properties on the scope. systemd_cmd = [ - "systemd-run", "--property=CPUAccounting=no", "--property=MemoryAccounting=no", + "systemd-run", "--unit={0}".format(logcollector.CGROUPS_UNIT), "--slice={0}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE), "--scope" - ] + ] + CGroupConfigurator.get_instance().get_logcollector_unit_properties() # The log tool is invoked from the current agent's egg with the command line option collect_logs_cmd = [sys.executable, "-u", sys.argv[0], "-collect-logs"] @@ -208,8 +213,7 @@ def exec_command(): # pylint has limited (i.e. no) awareness of control flow w.r.t. typing. we disable=no-member # here because we know e must be a CommandError but pylint still considers the case where # e is a different type of exception. - err_msg = ustr("Log Collector exited with code {0}").format( - e.returncode) # pylint: disable=no-member + err_msg = ustr("Log Collector exited with code {0}").format(e.returncode) # pylint: disable=no-member if e.returncode == logcollector.INVALID_CGROUPS_ERRCODE: # pylint: disable=no-member logger.info("Disabling periodic log collection until service restart due to process error.") @@ -262,8 +266,8 @@ def _send_logs(self): log_event=False) -def get_log_collector_monitor_handler(cgroups): - return LogCollectorMonitorHandler(cgroups) +def get_log_collector_monitor_handler(controllers): + return LogCollectorMonitorHandler(controllers) class LogCollectorMonitorHandler(ThreadHandlerInterface): @@ -277,12 +281,13 @@ class LogCollectorMonitorHandler(ThreadHandlerInterface): def get_thread_name(): return LogCollectorMonitorHandler._THREAD_NAME - def __init__(self, cgroups): + def __init__(self, controllers): self.event_thread = None self.should_run = True self.period = 2 # Log collector monitor runs every 2 secs. - self.cgroups = cgroups - self.__log_metrics = conf.get_cgroup_log_metrics() + self.controllers = controllers + self.max_recorded_metrics = {} + self.__should_log_metrics = conf.get_cgroup_log_metrics() def run(self): self.start() @@ -303,8 +308,8 @@ def is_alive(self): def start(self): self.event_thread = threading.Thread(target=self.daemon) - self.event_thread.setDaemon(True) - self.event_thread.setName(self.get_thread_name()) + self.event_thread.daemon = True + self.event_thread.name = self.get_thread_name() self.event_thread.start() def daemon(self): @@ -312,7 +317,8 @@ def daemon(self): while not self.stopped(): try: metrics = self._poll_resource_usage() - self._send_telemetry(metrics) + if self.__should_log_metrics: + self._log_metrics(metrics) self._verify_memory_limit(metrics) except Exception as e: logger.error("An error occurred in the log collection monitor thread loop; " @@ -324,30 +330,54 @@ def daemon(self): "An error occurred in the MonitorLogCollectorCgroupsHandler thread; will exit the thread.\n{0}", ustr(e)) + def get_max_recorded_metrics(self): + return self.max_recorded_metrics + def _poll_resource_usage(self): metrics = [] - for cgroup in self.cgroups: - metrics.extend(cgroup.get_tracked_metrics(track_throttled_time=True)) + for controller in self.controllers: + metrics.extend(controller.get_tracked_metrics(track_throttled_time=True)) + + for metric in metrics: + current_max = self.max_recorded_metrics.get(metric.counter) + self.max_recorded_metrics[metric.counter] = metric.value if current_max is None else max(current_max, metric.value) + return metrics - def _send_telemetry(self, metrics): + def _log_metrics(self, metrics): for metric in metrics: - report_metric(metric.category, metric.counter, metric.instance, metric.value, log_event=self.__log_metrics) + logger.info("Metric {0}/{1} [{2}] = {3}".format(metric.category, metric.counter, metric.instance, metric.value)) def _verify_memory_limit(self, metrics): - current_usage = 0 + current_anon_and_swap_usage = 0 + current_cache_usage = 0 + memory_throttled_events = 0 for metric in metrics: - if metric.counter == MetricsCounter.TOTAL_MEM_USAGE: - current_usage += metric.value + if metric.counter == MetricsCounter.ANON_MEM_USAGE: + current_anon_and_swap_usage += metric.value elif metric.counter == MetricsCounter.SWAP_MEM_USAGE: - current_usage += metric.value - - if current_usage > LOGCOLLECTOR_MEMORY_LIMIT: - msg = "Log collector memory limit {0} bytes exceeded. The max reported usage is {1} bytes.".format(LOGCOLLECTOR_MEMORY_LIMIT, current_usage) + current_anon_and_swap_usage += metric.value + elif metric.counter == MetricsCounter.CACHE_MEM_USAGE: + current_cache_usage = metric.value + elif metric.counter == MetricsCounter.MEM_THROTTLED: + memory_throttled_events = metric.value + + mem_limit_exceeded = False + if current_anon_and_swap_usage > LOGCOLLECTOR_ANON_MEMORY_LIMIT_FOR_V1_AND_V2: + mem_limit_exceeded = True + msg = "Log collector anon + swap memory limit {0} bytes exceeded. The reported usage is {1} bytes.".format(LOGCOLLECTOR_ANON_MEMORY_LIMIT_FOR_V1_AND_V2, current_anon_and_swap_usage) logger.info(msg) - add_event( - name=AGENT_NAME, - version=CURRENT_VERSION, - op=WALAEventOperation.LogCollection, - message=msg) + add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.LogCollection, message=msg) + if current_cache_usage > LOGCOLLECTOR_CACHE_MEMORY_LIMIT_FOR_V1_AND_V2: + mem_limit_exceeded = True + msg = "Log collector cache memory limit {0} bytes exceeded. The reported usage is {1} bytes.".format(LOGCOLLECTOR_CACHE_MEMORY_LIMIT_FOR_V1_AND_V2, current_cache_usage) + logger.info(msg) + add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.LogCollection, message=msg) + if memory_throttled_events > LOGCOLLECTOR_MAX_THROTTLED_EVENTS_FOR_V2: + mem_limit_exceeded = True + msg = "Log collector memory throttled events limit {0} exceeded. The reported number of throttled events is {1}.".format(LOGCOLLECTOR_MAX_THROTTLED_EVENTS_FOR_V2, memory_throttled_events) + logger.info(msg) + add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.LogCollection, message=msg) + + if mem_limit_exceeded: os._exit(GRACEFUL_KILL_ERRCODE) diff --git a/azurelinuxagent/ga/collect_telemetry_events.py b/azurelinuxagent/ga/collect_telemetry_events.py index e0144a6399..05f18c60d4 100644 --- a/azurelinuxagent/ga/collect_telemetry_events.py +++ b/azurelinuxagent/ga/collect_telemetry_events.py @@ -499,7 +499,7 @@ def _trim_legacy_extension_event_parameters(event): :param event: Extension event to trim. :return: Trimmed extension event; containing only extension-specific parameters. """ - params_to_keep = dict().fromkeys([ + params_to_keep = dict.fromkeys([ GuestAgentExtensionEventsSchema.Name, GuestAgentExtensionEventsSchema.Version, GuestAgentExtensionEventsSchema.Operation, @@ -542,8 +542,8 @@ def is_alive(self): def start(self): self.thread = threading.Thread(target=self.daemon) - self.thread.setDaemon(True) - self.thread.setName(CollectTelemetryEventsHandler.get_thread_name()) + self.thread.daemon = True + self.thread.name = CollectTelemetryEventsHandler.get_thread_name() self.thread.start() def stop(self): @@ -583,4 +583,4 @@ def daemon(self): @staticmethod def add_common_params_to_telemetry_event(event, event_time): reporter = get_event_logger() - reporter.add_common_event_parameters(event, event_time) \ No newline at end of file + reporter.add_common_event_parameters(event, event_time) diff --git a/azurelinuxagent/ga/cpucontroller.py b/azurelinuxagent/ga/cpucontroller.py new file mode 100644 index 0000000000..b4f56dd150 --- /dev/null +++ b/azurelinuxagent/ga/cpucontroller.py @@ -0,0 +1,293 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ + +import errno +import os +import re + +from azurelinuxagent.common.exception import CGroupsException +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.ga.cgroupcontroller import _CgroupController, MetricValue, MetricsCategory, MetricsCounter + +re_v1_user_system_times = re.compile(r'user (\d+)\nsystem (\d+)\n') +re_v2_usage_time = re.compile(r'[\s\S]*usage_usec (\d+)[\s\S]*') + + +class _CpuController(_CgroupController): + def __init__(self, name, cgroup_path): + super(_CpuController, self).__init__(name, cgroup_path) + + self._osutil = get_osutil() + self._previous_cgroup_cpu = None + self._previous_system_cpu = None + self._current_cgroup_cpu = None + self._current_system_cpu = None + self._previous_throttled_time = None + self._current_throttled_time = None + + def _get_cpu_stat_counter(self, counter_name): + """ + Gets the value for the provided counter in cpu.stat + """ + try: + with open(os.path.join(self.path, 'cpu.stat')) as cpu_stat: + # + # Sample file v1: + # # cat cpu.stat + # nr_periods 51660 + # nr_throttled 19461 + # throttled_time 1529590856339 + # + # Sample file v2 + # # cat cpu.stat + # usage_usec 200161503 + # user_usec 199388368 + # system_usec 773134 + # core_sched.force_idle_usec 0 + # nr_periods 40059 + # nr_throttled 40022 + # throttled_usec 3565247992 + # nr_bursts 0 + # burst_usec 0 + # + for line in cpu_stat: + match = re.match(r'{0}\s+(\d+)'.format(counter_name), line) + if match is not None: + return int(match.groups()[0]) + raise Exception("Cannot find {0}".format(counter_name)) + except (IOError, OSError) as e: + if e.errno == errno.ENOENT: + return 0 + raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e))) + except Exception as e: + raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e))) + + def _cpu_usage_initialized(self): + """ + Returns True if cpu usage has been initialized, False otherwise. + """ + return self._current_cgroup_cpu is not None and self._current_system_cpu is not None + + def initialize_cpu_usage(self): + """ + Sets the initial values of CPU usage. This function must be invoked before calling get_cpu_usage(). + """ + raise NotImplementedError() + + def get_cpu_usage(self): + """ + Computes the CPU used by the cgroup since the last call to this function. + + The usage is measured as a percentage of utilization of 1 core in the system. For example, + using 1 core all of the time on a 4-core system would be reported as 100%. + + NOTE: initialize_cpu_usage() must be invoked before calling get_cpu_usage() + """ + raise NotImplementedError() + + def get_cpu_throttled_time(self, read_previous_throttled_time=True): + """ + Computes the throttled time (in seconds) since the last call to this function. + NOTE: initialize_cpu_usage() must be invoked before calling this function + Compute only current throttled time if read_previous_throttled_time set to False + """ + raise NotImplementedError() + + def get_tracked_metrics(self, **kwargs): + # Note: If the current cpu usage is less than the previous usage (metric is negative), then an empty array will + # be returned and the agent won't track the metrics. + tracked = [] + cpu_usage = self.get_cpu_usage() + if cpu_usage >= float(0): + tracked.append(MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.PROCESSOR_PERCENT_TIME, self.name, cpu_usage)) + + if 'track_throttled_time' in kwargs and kwargs['track_throttled_time']: + throttled_time = self.get_cpu_throttled_time() + if cpu_usage >= float(0) and throttled_time >= float(0): + tracked.append(MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.THROTTLED_TIME, self.name, throttled_time)) + + return tracked + + def get_unit_properties(self): + return ["CPUAccounting", "CPUQuotaPerSecUSec"] + + +class CpuControllerV1(_CpuController): + def initialize_cpu_usage(self): + if self._cpu_usage_initialized(): + raise CGroupsException("initialize_cpu_usage() should be invoked only once") + self._current_cgroup_cpu = self._get_cpu_ticks(allow_no_such_file_or_directory_error=True) + self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot() + self._current_throttled_time = self._get_cpu_stat_counter(counter_name='throttled_time') + + def _get_cpu_ticks(self, allow_no_such_file_or_directory_error=False): + """ + Returns the number of USER_HZ of CPU time (user and system) consumed by this cgroup. + + If allow_no_such_file_or_directory_error is set to True and cpuacct.stat does not exist the function + returns 0; this is useful when the function can be called before the cgroup has been created. + """ + try: + cpuacct_stat = self._get_file_contents('cpuacct.stat') + except Exception as e: + if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: # pylint: disable=E1101 + raise CGroupsException("Failed to read cpuacct.stat: {0}".format(ustr(e))) + if not allow_no_such_file_or_directory_error: + raise e + cpuacct_stat = None + + cpu_ticks = 0 + + if cpuacct_stat is not None: + # + # Sample file: + # # cat /sys/fs/cgroup/cpuacct/azure.slice/walinuxagent.service/cpuacct.stat + # user 10190 + # system 3160 + # + match = re_v1_user_system_times.match(cpuacct_stat) + if not match: + raise CGroupsException("The contents of {0} are invalid: {1}".format(self._get_cgroup_file('cpuacct.stat'), cpuacct_stat)) + cpu_ticks = int(match.groups()[0]) + int(match.groups()[1]) + + return cpu_ticks + + def get_cpu_usage(self): + if not self._cpu_usage_initialized(): + raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_usage()") + + self._previous_cgroup_cpu = self._current_cgroup_cpu + self._previous_system_cpu = self._current_system_cpu + self._current_cgroup_cpu = self._get_cpu_ticks() + self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot() + + cgroup_delta = self._current_cgroup_cpu - self._previous_cgroup_cpu + system_delta = max(1, self._current_system_cpu - self._previous_system_cpu) + + return round(100.0 * self._osutil.get_processor_cores() * float(cgroup_delta) / float(system_delta), 3) + + def get_cpu_throttled_time(self, read_previous_throttled_time=True): + # Throttled time is reported in nanoseconds in v1 + if not read_previous_throttled_time: + return float(self._get_cpu_stat_counter(counter_name='throttled_time') / 1E9) + + if not self._cpu_usage_initialized(): + raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_throttled_time()") + + self._previous_throttled_time = self._current_throttled_time + self._current_throttled_time = self._get_cpu_stat_counter(counter_name='throttled_time') + + return round(float(self._current_throttled_time - self._previous_throttled_time) / 1E9, 3) + + +class CpuControllerV2(_CpuController): + @staticmethod + def get_system_uptime(): + """ + Get the uptime of the system (including time spent in suspend) in seconds. + /proc/uptime contains two numbers (values in seconds): the uptime of the system (including time spent in + suspend) and the amount of time spent in the idle process: + # cat /proc/uptime + 365380.48 722644.81 + + :return: System uptime in seconds + :rtype: float + """ + uptime_contents = fileutil.read_file('/proc/uptime').split() + return float(uptime_contents[0]) + + def _get_system_usage(self): + try: + return self.get_system_uptime() + except (OSError, IOError) as e: + raise CGroupsException("Couldn't read /proc/uptime: {0}".format(ustr(e))) + except Exception as e: + raise CGroupsException("Couldn't parse /proc/uptime: {0}".format(ustr(e))) + + def initialize_cpu_usage(self): + if self._cpu_usage_initialized(): + raise CGroupsException("initialize_cpu_usage() should be invoked only once") + self._current_cgroup_cpu = self._get_cpu_time(allow_no_such_file_or_directory_error=True) + self._current_system_cpu = self._get_system_usage() + self._current_throttled_time = self._get_cpu_stat_counter(counter_name='throttled_usec') + + def _get_cpu_time(self, allow_no_such_file_or_directory_error=False): + """ + Returns the CPU time (user and system) consumed by this cgroup in seconds. + + If allow_no_such_file_or_directory_error is set to True and cpu.stat does not exist the function + returns 0; this is useful when the function can be called before the cgroup has been created. + """ + try: + cpu_stat = self._get_file_contents('cpu.stat') + except Exception as e: + if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: # pylint: disable=E1101 + raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e))) + if not allow_no_such_file_or_directory_error: + raise e + cpu_stat = None + + cpu_time = 0 + + if cpu_stat is not None: + # + # Sample file: + # # cat /sys/fs/cgroup/azure.slice/azure-walinuxagent.slice/azure-walinuxagent-logcollector.slice/collect-logs.scope/cpu.stat + # usage_usec 1990707 + # user_usec 1939858 + # system_usec 50848 + # core_sched.force_idle_usec 0 + # nr_periods 397 + # nr_throttled 397 + # throttled_usec 37994949 + # nr_bursts 0 + # burst_usec 0 + # + match = re_v2_usage_time.match(cpu_stat) + if not match: + raise CGroupsException("The contents of {0} are invalid: {1}".format(self._get_cgroup_file('cpu.stat'), cpu_stat)) + cpu_time = int(match.groups()[0]) / 1E6 + + return cpu_time + + def get_cpu_usage(self): + if not self._cpu_usage_initialized(): + raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_usage()") + + self._previous_cgroup_cpu = self._current_cgroup_cpu + self._previous_system_cpu = self._current_system_cpu + self._current_cgroup_cpu = self._get_cpu_time() + self._current_system_cpu = self._get_system_usage() + + cgroup_delta = self._current_cgroup_cpu - self._previous_cgroup_cpu + system_delta = max(1.0, self._current_system_cpu - self._previous_system_cpu) + + return round(100.0 * float(cgroup_delta) / float(system_delta), 3) + + def get_cpu_throttled_time(self, read_previous_throttled_time=True): + # Throttled time is reported in microseconds in v2 + if not read_previous_throttled_time: + return float(self._get_cpu_stat_counter(counter_name='throttled_usec') / 1E6) + + if not self._cpu_usage_initialized(): + raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_throttled_time()") + + self._previous_throttled_time = self._current_throttled_time + self._current_throttled_time = self._get_cpu_stat_counter(counter_name='throttled_usec') + + return round(float(self._current_throttled_time - self._previous_throttled_time) / 1E6, 3) diff --git a/azurelinuxagent/ga/env.py b/azurelinuxagent/ga/env.py index 9e7e7285ef..3d5887e6f0 100644 --- a/azurelinuxagent/ga/env.py +++ b/azurelinuxagent/ga/env.py @@ -35,9 +35,9 @@ from azurelinuxagent.ga.periodic_operation import PeriodicOperation CACHE_PATTERNS = [ - re.compile("^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), # pylint: disable=W1401 - re.compile("^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), # pylint: disable=W1401 - re.compile("^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) # pylint: disable=W1401 + re.compile(r"^(.*)\.(\d+)\.(agentsManifest)$", re.IGNORECASE), + re.compile(r"^(.*)\.(\d+)\.(manifest\.xml)$", re.IGNORECASE), + re.compile(r"^(.*)\.(\d+)\.(xml)$", re.IGNORECASE) ] MAXIMUM_CACHED_FILES = 50 @@ -244,8 +244,8 @@ def is_alive(self): def start(self): self.env_thread = threading.Thread(target=self.daemon) - self.env_thread.setDaemon(True) - self.env_thread.setName(self.get_thread_name()) + self.env_thread.daemon = True + self.env_thread.name = self.get_thread_name() self.env_thread.start() def daemon(self): diff --git a/azurelinuxagent/ga/extensionprocessutil.py b/azurelinuxagent/ga/extensionprocessutil.py index d2b37551ba..8eb65d459b 100644 --- a/azurelinuxagent/ga/extensionprocessutil.py +++ b/azurelinuxagent/ga/extensionprocessutil.py @@ -31,7 +31,7 @@ TELEMETRY_MESSAGE_MAX_LEN = 3200 -def wait_for_process_completion_or_timeout(process, timeout, cpu_cgroup): +def wait_for_process_completion_or_timeout(process, timeout, cpu_controller): """ Utility function that waits for the process to complete within the given time frame. This function will terminate the process if when the given time frame elapses. @@ -47,7 +47,7 @@ def wait_for_process_completion_or_timeout(process, timeout, cpu_cgroup): throttled_time = 0 if timeout == 0: - throttled_time = get_cpu_throttled_time(cpu_cgroup) + throttled_time = get_cpu_throttled_time(cpu_controller) os.killpg(os.getpgid(process.pid), signal.SIGKILL) else: # process completed or forked; sleep 1 sec to give the child process (if any) a chance to start @@ -57,7 +57,7 @@ def wait_for_process_completion_or_timeout(process, timeout, cpu_cgroup): return timeout == 0, return_code, throttled_time -def handle_process_completion(process, command, timeout, stdout, stderr, error_code, cpu_cgroup=None): +def handle_process_completion(process, command, timeout, stdout, stderr, error_code, cpu_controller=None): """ Utility function that waits for process completion and retrieves its output (stdout and stderr) if it completed before the timeout period. Otherwise, the process will get killed and an ExtensionError will be raised. @@ -68,15 +68,15 @@ def handle_process_completion(process, command, timeout, stdout, stderr, error_c :param stdout: Must be a file since we seek on it when parsing the subprocess output :param stderr: Must be a file since we seek on it when parsing the subprocess outputs :param error_code: The error code to set if we raise an ExtensionError - :param cpu_cgroup: Reference the cpu cgroup name and path + :param cpu_controller: References the cpu controller for the cgroup :return: """ # Wait for process completion or timeout - timed_out, return_code, throttled_time = wait_for_process_completion_or_timeout(process, timeout, cpu_cgroup) + timed_out, return_code, throttled_time = wait_for_process_completion_or_timeout(process, timeout, cpu_controller) process_output = read_output(stdout, stderr) if timed_out: - if cpu_cgroup is not None: # Report CPUThrottledTime when timeout happens + if cpu_controller is not None: # Report CPUThrottledTime when timeout happens raise ExtensionError("Timeout({0});CPUThrottledTime({1}secs): {2}\n{3}".format(timeout, throttled_time, command, process_output), code=ExtensionErrorCodes.PluginHandlerScriptTimedout) @@ -211,14 +211,14 @@ def to_s(captured_stdout, stdout_offset, captured_stderr, stderr_offset): return to_s(stdout, -1*max_len_each, stderr, -1*max_len_each) -def get_cpu_throttled_time(cpu_cgroup): +def get_cpu_throttled_time(cpu_controller): """ return the throttled time for the given cgroup. """ throttled_time = 0 - if cpu_cgroup is not None: + if cpu_controller is not None: try: - throttled_time = cpu_cgroup.get_cpu_throttled_time(read_previous_throttled_time=False) + throttled_time = cpu_controller.get_cpu_throttled_time(read_previous_throttled_time=False) except Exception as e: logger.warn("Failed to get cpu throttled time for the extension: {0}", ustr(e)) diff --git a/azurelinuxagent/ga/exthandlers.py b/azurelinuxagent/ga/exthandlers.py index fe43cfcfd7..d23b1630ce 100644 --- a/azurelinuxagent/ga/exthandlers.py +++ b/azurelinuxagent/ga/exthandlers.py @@ -27,7 +27,6 @@ import tempfile import time import zipfile -from distutils.version import LooseVersion from collections import defaultdict from functools import partial @@ -2258,7 +2257,7 @@ def get_resource_limits(self, extension_name, str_version): This is not effective after nov 30th. """ if ExtHandlerInstance.is_azuremonitorlinuxagent(extension_name): - if LooseVersion(str_version) < LooseVersion("1.12"): + if FlexibleVersion(str_version) < FlexibleVersion("1.12"): test_man = { "resourceLimits": { "services": [ diff --git a/azurelinuxagent/ga/logcollector.py b/azurelinuxagent/ga/logcollector.py index e21b1f51f1..dfd5bfaf1e 100644 --- a/azurelinuxagent/ga/logcollector.py +++ b/azurelinuxagent/ga/logcollector.py @@ -304,27 +304,31 @@ def _get_final_list_for_archive(self, priority_file_queue): final_files_to_collect = [] while priority_file_queue: - file_path = heappop(priority_file_queue)[1] # (priority, file_path) - file_size = min(os.path.getsize(file_path), _FILE_SIZE_LIMIT) - - if total_uncompressed_size + file_size > _UNCOMPRESSED_ARCHIVE_SIZE_LIMIT: - _LOGGER.warning("Archive too big, done with adding files.") - break - - if os.path.getsize(file_path) <= _FILE_SIZE_LIMIT: - final_files_to_collect.append(file_path) - _LOGGER.info("Adding file %s, size %s b", file_path, file_size) - else: - truncated_file_path = self._truncate_large_file(file_path) - if truncated_file_path: - _LOGGER.info("Adding truncated file %s, size %s b", truncated_file_path, file_size) - final_files_to_collect.append(truncated_file_path) - - total_uncompressed_size += file_size + try: + file_path = heappop(priority_file_queue)[1] # (priority, file_path) + file_size = min(os.path.getsize(file_path), _FILE_SIZE_LIMIT) + + if total_uncompressed_size + file_size > _UNCOMPRESSED_ARCHIVE_SIZE_LIMIT: + _LOGGER.warning("Archive too big, done with adding files.") + break + + if os.path.getsize(file_path) <= _FILE_SIZE_LIMIT: + final_files_to_collect.append(file_path) + total_uncompressed_size += file_size + _LOGGER.info("Adding file %s, size %s b", file_path, file_size) + else: + truncated_file_path = self._truncate_large_file(file_path) + if truncated_file_path: + _LOGGER.info("Adding truncated file %s, size %s b", truncated_file_path, file_size) + final_files_to_collect.append(truncated_file_path) + total_uncompressed_size += file_size + except IOError as e: + if e.errno == 2: # [Errno 2] No such file or directory + _LOGGER.warning("File %s does not exist, skipping collection for this file", file_path) _LOGGER.info("Uncompressed archive size is %s b", total_uncompressed_size) - return final_files_to_collect + return final_files_to_collect, total_uncompressed_size def _create_list_of_files_to_collect(self): # The final list of files to be collected by zip is created in three steps: @@ -334,8 +338,8 @@ def _create_list_of_files_to_collect(self): # the size limit. parsed_file_paths = self._process_manifest_file() prioritized_file_paths = self._get_priority_files_list(parsed_file_paths) - files_to_collect = self._get_final_list_for_archive(prioritized_file_paths) - return files_to_collect + files_to_collect, total_uncompressed_size = self._get_final_list_for_archive(prioritized_file_paths) + return files_to_collect, total_uncompressed_size def collect_logs_and_get_archive(self): """ @@ -343,6 +347,7 @@ def collect_logs_and_get_archive(self): :return: Returns the path of the collected compressed archive """ files_to_collect = [] + total_uncompressed_size = 0 try: # Clear previous run's output and create base directories if they don't exist already. @@ -352,26 +357,37 @@ def collect_logs_and_get_archive(self): _LOGGER.info("Starting log collection at %s", start_time.strftime("%Y-%m-%dT%H:%M:%SZ")) _LOGGER.info("Using log collection mode %s", "full" if self._is_full_mode else "normal") - files_to_collect = self._create_list_of_files_to_collect() + files_to_collect, total_uncompressed_size = self._create_list_of_files_to_collect() _LOGGER.info("### Creating compressed archive ###") compressed_archive = None + def handle_add_file_to_archive_error(error_count, max_errors, file_to_collect, exception): + error_count += 1 + if error_count >= max_errors: + raise Exception("Too many errors, giving up. Last error: {0}".format(ustr(exception))) + else: + _LOGGER.warning("Failed to add file %s to the archive: %s", file_to_collect, ustr(exception)) + return error_count + try: compressed_archive = zipfile.ZipFile(COMPRESSED_ARCHIVE_PATH, "w", compression=zipfile.ZIP_DEFLATED) max_errors = 8 error_count = 0 + for file_to_collect in files_to_collect: try: archive_file_name = LogCollector._convert_file_name_to_archive_name(file_to_collect) compressed_archive.write(file_to_collect.encode("utf-8"), arcname=archive_file_name) - except Exception as e: - error_count += 1 - if error_count >= max_errors: - raise Exception("Too many errors, giving up. Last error: {0}".format(ustr(e))) + except IOError as e: + if e.errno == 2: # [Errno 2] No such file or directory + _LOGGER.warning("File %s does not exist, skipping collection for this file", + file_to_collect) else: - _LOGGER.warning("Failed to add file %s to the archive: %s", file_to_collect, ustr(e)) + error_count = handle_add_file_to_archive_error(error_count, max_errors, file_to_collect, e) + except Exception as e: + error_count = handle_add_file_to_archive_error(error_count, max_errors, file_to_collect, e) compressed_archive_size = os.path.getsize(COMPRESSED_ARCHIVE_PATH) _LOGGER.info("Successfully compressed files. Compressed archive size is %s b", compressed_archive_size) @@ -387,7 +403,7 @@ def collect_logs_and_get_archive(self): if compressed_archive is not None: compressed_archive.close() - return COMPRESSED_ARCHIVE_PATH + return COMPRESSED_ARCHIVE_PATH, total_uncompressed_size except Exception as e: msg = "Failed to collect logs: {0}".format(ustr(e)) _LOGGER.error(msg) diff --git a/azurelinuxagent/ga/memorycontroller.py b/azurelinuxagent/ga/memorycontroller.py new file mode 100644 index 0000000000..30e7540cf3 --- /dev/null +++ b/azurelinuxagent/ga/memorycontroller.py @@ -0,0 +1,220 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.6+ and Openssl 1.0+ + +import errno +import os +import re + +from azurelinuxagent.common import logger +from azurelinuxagent.common.exception import CGroupsException +from azurelinuxagent.common.future import ustr +from azurelinuxagent.ga.cgroupcontroller import _CgroupController, CounterNotFound, MetricValue, MetricsCategory, \ + MetricsCounter, _REPORT_EVERY_HOUR + + +class _MemoryController(_CgroupController): + def __init__(self, name, cgroup_path): + super(_MemoryController, self).__init__(name, cgroup_path) + self._counter_not_found_error_count = 0 + + def _get_memory_stat_counter(self, counter_name): + """ + Gets the value for the provided counter in memory.stat + """ + try: + with open(os.path.join(self.path, 'memory.stat')) as memory_stat: + # + # Sample file v1: + # # cat memory.stat + # cache 0 + # rss 0 + # rss_huge 0 + # shmem 0 + # mapped_file 0 + # dirty 0 + # writeback 0 + # swap 0 + # ... + # + # Sample file v2 + # # cat memory.stat + # anon 0 + # file 147140608 + # kernel 1421312 + # kernel_stack 0 + # pagetables 0 + # sec_pagetables 0 + # percpu 130752 + # sock 0 + # ... + # + for line in memory_stat: + re_memory_counter = r'{0}\s+(\d+)'.format(counter_name) + match = re.match(re_memory_counter, line) + if match is not None: + return int(match.groups()[0]) + except (IOError, OSError) as e: + if e.errno == errno.ENOENT: + raise + raise CGroupsException("Failed to read memory.stat: {0}".format(ustr(e))) + except Exception as e: + raise CGroupsException("Failed to read memory.stat: {0}".format(ustr(e))) + + raise CounterNotFound("Cannot find counter: {0}".format(counter_name)) + + def get_memory_usage(self): + """ + Collects anon and cache usage for the cgroup and returns as a tuple + Returns anon and cache memory usage for the cgroup as a tuple -> (anon, cache) + + :return: Anon and cache memory usage in bytes + :rtype: tuple[int, int] + """ + raise NotImplementedError() + + def try_swap_memory_usage(self): + """ + Collects swap usage for the cgroup + + :return: Memory usage in bytes + :rtype: int + """ + raise NotImplementedError() + + def get_max_memory_usage(self): + """ + Collect max memory usage for the cgroup. + + :return: Memory usage in bytes + :rtype: int + """ + raise NotImplementedError() + + def get_tracked_metrics(self, **_): + # The log collector monitor tracks anon and cache memory separately. + anon_mem_usage, cache_mem_usage = self.get_memory_usage() + total_mem_usage = anon_mem_usage + cache_mem_usage + return [ + MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.TOTAL_MEM_USAGE, self.name, total_mem_usage), + MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.ANON_MEM_USAGE, self.name, anon_mem_usage), + MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.CACHE_MEM_USAGE, self.name, cache_mem_usage), + MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.MAX_MEM_USAGE, self.name, + self.get_max_memory_usage(), _REPORT_EVERY_HOUR), + MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.SWAP_MEM_USAGE, self.name, + self.try_swap_memory_usage(), _REPORT_EVERY_HOUR) + ] + + def get_unit_properties(self): + return["MemoryAccounting"] + + +class MemoryControllerV1(_MemoryController): + def get_memory_usage(self): + # In v1, anon memory is reported in the 'rss' counter + return self._get_memory_stat_counter("rss"), self._get_memory_stat_counter("cache") + + def try_swap_memory_usage(self): + # In v1, swap memory should be collected from memory.stat, because memory.memsw.usage_in_bytes reports total Memory+SWAP. + try: + return self._get_memory_stat_counter("swap") + except CounterNotFound as e: + if self._counter_not_found_error_count < 1: + logger.periodic_info(logger.EVERY_HALF_HOUR, + '{0} from "memory.stat" file in the cgroup: {1}---[Note: This log for informational purpose only and can be ignored]'.format(ustr(e), self.path)) + self._counter_not_found_error_count += 1 + return 0 + + def get_max_memory_usage(self): + # In v1, max memory usage is reported in memory.max_usage_in_bytes + usage = 0 + try: + usage = int(self._get_parameters('memory.max_usage_in_bytes', first_line_only=True)) + except Exception as e: + if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101 + raise + raise CGroupsException("Exception while attempting to read {0}".format("memory.max_usage_in_bytes"), e) + + return usage + + +class MemoryControllerV2(_MemoryController): + def get_memory_usage(self): + # In v2, cache memory is reported in the 'file' counter + return self._get_memory_stat_counter("anon"), self._get_memory_stat_counter("file") + + def get_memory_throttled_events(self): + """ + Returns the number of times processes of the cgroup are throttled and routed to perform memory recliam because + the high memory boundary was exceeded. + + :return: Number of memory throttling events for the cgroup + :rtype: int + """ + try: + with open(os.path.join(self.path, 'memory.events')) as memory_events: + # + # Sample file: + # # cat memory.events + # low 0 + # high 0 + # max 0 + # oom 0 + # oom_kill 0 + # oom_group_kill 0 + # + for line in memory_events: + match = re.match(r'high\s+(\d+)', line) + if match is not None: + return int(match.groups()[0]) + except (IOError, OSError) as e: + if e.errno == errno.ENOENT: + raise + raise CGroupsException("Failed to read memory.events: {0}".format(ustr(e))) + except Exception as e: + raise CGroupsException("Failed to read memory.events: {0}".format(ustr(e))) + + raise CounterNotFound("Cannot find memory.events counter: high") + + def try_swap_memory_usage(self): + # In v2, swap memory is reported in memory.swap.current + usage = 0 + try: + usage = int(self._get_parameters('memory.swap.current', first_line_only=True)) + except Exception as e: + if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101 + raise + raise CGroupsException("Exception while attempting to read {0}".format("memory.swap.current"), e) + + return usage + + def get_max_memory_usage(self): + # In v2, max memory usage is reported in memory.peak + usage = 0 + try: + usage = int(self._get_parameters('memory.peak', first_line_only=True)) + except Exception as e: + if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101 + raise + raise CGroupsException("Exception while attempting to read {0}".format("memory.peak"), e) + + return usage + + def get_tracked_metrics(self, **_): + metrics = super(MemoryControllerV2, self).get_tracked_metrics() + throttled_value = MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.MEM_THROTTLED, self.name, + self.get_memory_throttled_events()) + metrics.append(throttled_value) + return metrics diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py index 1c123d70e3..c1340ed69a 100644 --- a/azurelinuxagent/ga/monitor.py +++ b/azurelinuxagent/ga/monitor.py @@ -22,7 +22,7 @@ import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.networkutil as networkutil -from azurelinuxagent.ga.cgroup import MetricValue, MetricsCategory, MetricsCounter +from azurelinuxagent.ga.cgroupcontroller import MetricValue, MetricsCategory, MetricsCounter from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.errorstate import ErrorState @@ -216,10 +216,10 @@ class SendImdsHeartbeat(PeriodicOperation): Periodic operation to report the IDMS's health. The signal is 'Healthy' when we have successfully called and validated a response in the last _IMDS_HEALTH_PERIOD. """ - def __init__(self, protocol_util, health_service): + def __init__(self, health_service): super(SendImdsHeartbeat, self).__init__(SendImdsHeartbeat._IMDS_HEARTBEAT_PERIOD) self.health_service = health_service - self.imds_client = get_imds_client(protocol_util.get_wireserver_endpoint()) + self.imds_client = get_imds_client() self.imds_error_state = ErrorState(min_timedelta=SendImdsHeartbeat._IMDS_HEALTH_PERIOD) _IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1) @@ -281,8 +281,8 @@ def is_alive(self): def start(self): self.monitor_thread = threading.Thread(target=self.daemon) - self.monitor_thread.setDaemon(True) - self.monitor_thread.setName(self.get_thread_name()) + self.monitor_thread.daemon = True + self.monitor_thread.name = self.get_thread_name() self.monitor_thread.start() def daemon(self): @@ -298,7 +298,7 @@ def daemon(self): PollResourceUsage(), PollSystemWideResourceUsage(), SendHostPluginHeartbeat(protocol, health_service), - SendImdsHeartbeat(protocol_util, health_service) + SendImdsHeartbeat(health_service) ] report_network_configuration_changes = ReportNetworkConfigurationChanges() diff --git a/azurelinuxagent/ga/periodic_operation.py b/azurelinuxagent/ga/periodic_operation.py index 35bc6e6810..1414239c86 100644 --- a/azurelinuxagent/ga/periodic_operation.py +++ b/azurelinuxagent/ga/periodic_operation.py @@ -70,7 +70,7 @@ def sleep_until_next_operation(operations): Takes a list of operations, finds the operation that should be executed next (that with the closest next_run_time) and sleeps until it is time to execute that operation. """ - next_operation_time = min([op.next_run_time() for op in operations]) + next_operation_time = min(op.next_run_time() for op in operations) sleep_timedelta = next_operation_time - datetime.datetime.utcnow() # timedelta.total_seconds() is not available on Python 2.6, do the computation manually diff --git a/azurelinuxagent/ga/persist_firewall_rules.py b/azurelinuxagent/ga/persist_firewall_rules.py index 74b878ce57..e7c8373ecb 100644 --- a/azurelinuxagent/ga/persist_firewall_rules.py +++ b/azurelinuxagent/ga/persist_firewall_rules.py @@ -34,7 +34,8 @@ class PersistFirewallRulesHandler(object): # This unit file (Version={version}) was created by the Azure VM Agent. # Do not edit. [Unit] -Description=Setup network rules for WALinuxAgent +Description=Setup network rules for WALinuxAgent +After=local-fs.target Before=network-pre.target Wants=network-pre.target DefaultDependencies=no @@ -69,7 +70,7 @@ class PersistFirewallRulesHandler(object): # The current version of the unit file; Update it whenever the unit file is modified to ensure Agent can dynamically # modify the unit file on VM too - _UNIT_VERSION = "1.3" + _UNIT_VERSION = "1.4" @staticmethod def get_service_file_path(): @@ -184,7 +185,7 @@ def _setup_network_setup_service(self): self.__setup_binary_file() network_service_enabled = self.__verify_network_setup_service_enabled() - if network_service_enabled and not self.__unit_file_version_modified(): + if network_service_enabled and not self.__should_update_unit_file(): logger.info("Service: {0} already enabled. No change needed.".format(self._network_setup_service_name)) self.__log_network_setup_service_logs() @@ -198,8 +199,7 @@ def _setup_network_setup_service(self): # Create unit file with default values self.__set_service_unit_file() - # Reload systemd configurations when we setup the service for the first time to avoid systemctl warnings - self.__reload_systemd_conf() + # After modifying the service, systemctl may issue a warning when checking the service, and daemon-reload should not be used to clear the warning, since it can affect other services logger.info("Successfully added and enabled the {0}".format(self._network_setup_service_name)) def __setup_binary_file(self): @@ -296,13 +296,6 @@ def __log_network_setup_service_logs(self): message=msg, log_event=False) - def __reload_systemd_conf(self): - try: - logger.info("Executing systemctl daemon-reload for setting up {0}".format(self._network_setup_service_name)) - shellutil.run_command(["systemctl", "daemon-reload"]) - except Exception as exception: - logger.warn("Unable to reload systemctl configurations: {0}".format(ustr(exception))) - def __get_unit_file_version(self): if not os.path.exists(self.get_service_file_path()): raise OSError("{0} not found".format(self.get_service_file_path())) @@ -314,17 +307,29 @@ def __get_unit_file_version(self): return match.group(1).strip() - def __unit_file_version_modified(self): + def __get_unit_exec_start(self): + if not os.path.exists(self.get_service_file_path()): + raise OSError("{0} not found".format(self.get_service_file_path())) + + match = fileutil.findre_in_file(self.get_service_file_path(), + line_re="ExecStart=(.*)") + if match is None: + raise ValueError("ExecStart tag not found in the unit file") + + return match.group(1).strip() + + def __should_update_unit_file(self): """ - Check if the unit file version changed from the expected version - :return: True if unit file version changed else False + Check if the unit file version changed from the expected version or if the exec-start changed from the expected exec-start + :return: True if unit file need update else False """ try: unit_file_version = self.__get_unit_file_version() + unit_exec_start = self.__get_unit_exec_start() except Exception as error: - logger.info("Unable to determine version of unit file: {0}, overwriting unit file".format(ustr(error))) - # Since we can't determine the version, marking the file as modified to overwrite the unit file + logger.info("Unable to read content of unit file: {0}, overwriting unit file".format(ustr(error))) + # Since we can't determine the version or exec start, marking the file as modified to overwrite the unit file return True if unit_file_version != self._UNIT_VERSION: @@ -332,7 +337,14 @@ def __unit_file_version_modified(self): "Unit file version: {0} does not match with expected version: {1}, overwriting unit file".format( unit_file_version, self._UNIT_VERSION)) return True + binary_path = os.path.join(conf.get_lib_dir(), self.BINARY_FILE_NAME) + expected_exec_start = "{0} {1}".format(sys.executable, binary_path) + if unit_exec_start != expected_exec_start: + logger.info( + "Unit file exec-start: {0} does not match with expected exec-start: {1}, overwriting unit file".format( + unit_exec_start, expected_exec_start)) + return True logger.info( - "Unit file version matches with expected version: {0}, not overwriting unit file".format(unit_file_version)) + "Unit file matches with expected version: {0} and exec start: {1}, not overwriting unit file".format(unit_file_version, unit_exec_start)) return False diff --git a/azurelinuxagent/ga/send_telemetry_events.py b/azurelinuxagent/ga/send_telemetry_events.py index 2923a43b13..08d26eef31 100644 --- a/azurelinuxagent/ga/send_telemetry_events.py +++ b/azurelinuxagent/ga/send_telemetry_events.py @@ -70,8 +70,8 @@ def is_alive(self): def start(self): self._thread = threading.Thread(target=self._process_telemetry_thread) - self._thread.setDaemon(True) - self._thread.setName(self.get_thread_name()) + self._thread.daemon = True + self._thread.name = self.get_thread_name() self._thread.start() def stop(self): diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py index a8d34f7c40..9579fd1445 100644 --- a/azurelinuxagent/ga/update.py +++ b/azurelinuxagent/ga/update.py @@ -31,7 +31,6 @@ from azurelinuxagent.common import conf from azurelinuxagent.common import logger -from azurelinuxagent.common.protocol.imds import get_imds_client from azurelinuxagent.common.utils import fileutil, textutil from azurelinuxagent.common.agent_supported_feature import get_supported_feature_by_name, SupportedFeatureNames, \ get_agent_supported_features_list_for_crp @@ -395,7 +394,7 @@ def run(self, debug=False): self._check_daemon_running(debug) self._check_threads_running(all_thread_handlers) self._process_goal_state(exthandlers_handler, remote_access_handler, agent_update_handler) - self._send_heartbeat_telemetry(protocol) + self._send_heartbeat_telemetry(protocol, agent_update_handler) self._check_agent_memory_usage() time.sleep(self._goal_state_period) @@ -411,7 +410,7 @@ def run(self, debug=False): logger.warn(textutil.format_exception(error)) sys.exit(1) # additional return here because sys.exit is mocked in unit tests - return + return # pylint: disable=unreachable self._shutdown() sys.exit(0) @@ -475,25 +474,6 @@ def _wait_for_cloud_init(self): add_event(op=WALAEventOperation.CloudInit, message=message, is_success=False, log_event=False) self._cloud_init_completed = True # Mark as completed even on error since we will proceed to execute extensions - def _get_vm_size(self, protocol): - """ - Including VMSize is meant to capture the architecture of the VM (i.e. arm64 VMs will - have arm64 included in their vmsize field and amd64 will have no architecture indicated). - """ - if self._vm_size is None: - - imds_client = get_imds_client(protocol.get_endpoint()) - - try: - imds_info = imds_client.get_compute() - self._vm_size = imds_info.vmSize - except Exception as e: - err_msg = "Attempts to retrieve VM size information from IMDS are failing: {0}".format(textutil.format_exception(e)) - logger.periodic_warn(logger.EVERY_SIX_HOURS, "[PERIODIC] {0}".format(err_msg)) - return "unknown" - - return self._vm_size - def _get_vm_arch(self): return platform.machine() @@ -896,7 +876,7 @@ def _get_pid_parts(self): pid_file = conf.get_agent_pid_file_path() pid_dir = os.path.dirname(pid_file) pid_name = os.path.basename(pid_file) - pid_re = re.compile("(\d+)_{0}".format(re.escape(pid_name))) # pylint: disable=W1401 + pid_re = re.compile(r"(\d+)_{0}".format(re.escape(pid_name))) return pid_dir, pid_name, pid_re def _get_pid_files(self): @@ -1036,27 +1016,27 @@ def _write_pid_file(self): return pid_files, pid_file - def _send_heartbeat_telemetry(self, protocol): + def _send_heartbeat_telemetry(self, protocol, agent_update_handler): if self._last_telemetry_heartbeat is None: self._last_telemetry_heartbeat = datetime.utcnow() - UpdateHandler.TELEMETRY_HEARTBEAT_PERIOD if datetime.utcnow() >= (self._last_telemetry_heartbeat + UpdateHandler.TELEMETRY_HEARTBEAT_PERIOD): dropped_packets = self.osutil.get_firewall_dropped_packets(protocol.get_endpoint()) - auto_update_enabled = 1 if conf.get_autoupdate_enabled() else 0 + auto_update_enabled = 1 if conf.get_auto_update_to_latest_version() else 0 + update_mode = agent_update_handler.get_current_update_mode() - telemetry_msg = "{0};{1};{2};{3};{4}".format(self._heartbeat_counter, self._heartbeat_id, dropped_packets, - self._heartbeat_update_goal_state_error_count, - auto_update_enabled) - debug_log_msg = "[DEBUG HeartbeatCounter: {0};HeartbeatId: {1};DroppedPackets: {2};" \ - "UpdateGSErrors: {3};AutoUpdate: {4}]".format(self._heartbeat_counter, + # Note: When we add new values to the heartbeat message, please add a semicolon at the end of the value. + # This helps to parse the message easily in kusto queries with regex + heartbeat_msg = "HeartbeatCounter: {0};HeartbeatId: {1};DroppedPackets: {2};" \ + "UpdateGSErrors: {3};AutoUpdate: {4};UpdateMode: {5};".format(self._heartbeat_counter, self._heartbeat_id, dropped_packets, self._heartbeat_update_goal_state_error_count, - auto_update_enabled) + auto_update_enabled, update_mode) # Write Heartbeat events/logs add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True, - message=telemetry_msg, log_event=False) - logger.info(u"[HEARTBEAT] Agent {0} is running as the goal state agent {1}", CURRENT_AGENT, debug_log_msg) + message=heartbeat_msg, log_event=False) + logger.info(u"[HEARTBEAT] Agent {0} is running as the goal state agent [DEBUG {1}]", CURRENT_AGENT, heartbeat_msg) # Update/Reset the counters self._heartbeat_counter += 1 diff --git a/azurelinuxagent/pa/deprovision/default.py b/azurelinuxagent/pa/deprovision/default.py index 35b4ae82ed..d96adbfec5 100644 --- a/azurelinuxagent/pa/deprovision/default.py +++ b/azurelinuxagent/pa/deprovision/default.py @@ -162,7 +162,8 @@ def del_lib_dir_files(self, warnings, actions): # pylint: disable=W0613 'published_hostname', 'fast_track.json', 'initial_goal_state', - 'rsm_update.json' + 'waagent_rsm_update', + 'waagent_initial_update' ] known_files_glob = [ 'Extensions.*.xml', diff --git a/azurelinuxagent/pa/deprovision/factory.py b/azurelinuxagent/pa/deprovision/factory.py index 2caedc8daa..6da78a2d28 100644 --- a/azurelinuxagent/pa/deprovision/factory.py +++ b/azurelinuxagent/pa/deprovision/factory.py @@ -15,9 +15,9 @@ # Requires Python 2.6+ and Openssl 1.0+ # -from distutils.version import LooseVersion as Version # pylint: disable=no-name-in-module, import-error from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME +from azurelinuxagent.common.utils.distro_version import DistroVersion from .arch import ArchDeprovisionHandler from .clearlinux import ClearLinuxDeprovisionHandler from .coreos import CoreOSDeprovisionHandler @@ -31,7 +31,7 @@ def get_deprovision_handler(distro_name=DISTRO_NAME, if distro_name == "arch": return ArchDeprovisionHandler() if distro_name == "ubuntu": - if Version(distro_version) >= Version('18.04'): + if DistroVersion(distro_version) >= DistroVersion('18.04'): return Ubuntu1804DeprovisionHandler() else: return UbuntuDeprovisionHandler() diff --git a/azurelinuxagent/pa/rdma/centos.py b/azurelinuxagent/pa/rdma/centos.py index 5e82acf531..b02b785283 100644 --- a/azurelinuxagent/pa/rdma/centos.py +++ b/azurelinuxagent/pa/rdma/centos.py @@ -82,7 +82,7 @@ def is_rdma_package_up_to_date(self, pkg, fw_version): # Example match (pkg name, -, followed by 3 segments, fw_version and -): # - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64 # - fw_version=142 - pattern = '{0}-(\d+\.){{3,}}({1})-'.format(self.rdma_user_mode_package_name, fw_version) # pylint: disable=W1401 + pattern = r'{0}-(\d+\.){{3,}}({1})-'.format(self.rdma_user_mode_package_name, fw_version) return re.match(pattern, pkg) @staticmethod @@ -155,7 +155,7 @@ def install_rdma_drivers(self, fw_version): # Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*) kmod_pkg = self.get_file_by_pattern( - pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) # pylint: disable=W1401 + pkgs, r"%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) if not kmod_pkg: raise Exception("RDMA kernel mode package not found") kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg) @@ -164,7 +164,7 @@ def install_rdma_drivers(self, fw_version): # Install user mode driver (microsoft-hyper-v-rdma-*) umod_pkg = self.get_file_by_pattern( - pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) # pylint: disable=W1401 + pkgs, r"%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) if not umod_pkg: raise Exception("RDMA user mode package not found") umod_pkg_path = os.path.join(pkg_dir, umod_pkg) diff --git a/azurelinuxagent/pa/rdma/factory.py b/azurelinuxagent/pa/rdma/factory.py index ec4a8bc48b..34034818d8 100644 --- a/azurelinuxagent/pa/rdma/factory.py +++ b/azurelinuxagent/pa/rdma/factory.py @@ -15,11 +15,10 @@ # Requires Python 2.6+ and Openssl 1.0+ # -from distutils.version import LooseVersion as Version # pylint: disable=no-name-in-module, import-error - import azurelinuxagent.common.logger as logger from azurelinuxagent.pa.rdma.rdma import RDMAHandler from azurelinuxagent.common.version import DISTRO_FULL_NAME, DISTRO_VERSION +from azurelinuxagent.common.utils.distro_version import DistroVersion from .centos import CentOSRDMAHandler from .suse import SUSERDMAHandler from .ubuntu import UbuntuRDMAHandler @@ -34,7 +33,7 @@ def get_rdma_handler( (distro_full_name == 'SUSE Linux Enterprise Server' or distro_full_name == 'SLES' or distro_full_name == 'SLE_HPC') and - Version(distro_version) > Version('11') + DistroVersion(distro_version) > DistroVersion('11') ): return SUSERDMAHandler() diff --git a/azurelinuxagent/pa/rdma/rdma.py b/azurelinuxagent/pa/rdma/rdma.py index aabd05541e..a6e7c3fe69 100644 --- a/azurelinuxagent/pa/rdma/rdma.py +++ b/azurelinuxagent/pa/rdma/rdma.py @@ -246,7 +246,7 @@ def provision_network_direct_rdma(self): return retcode, out = shellutil.run_get_output("modinfo %s" % module_name) if retcode == 0: - version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) # pylint: disable=W1401 + version = re.search(r"version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) if version: v1 = int(version.groups(0)[0]) v2 = int(version.groups(0)[1]) @@ -368,10 +368,6 @@ def update_iboip_interfaces(self, mac_ip_array): count = 0 for nic in nics: - # look for IBoIP interface of format ibXXX - if not re.match(r"ib\w+", nic): - continue - mac_addr = None with open(os.path.join(net_dir, nic, "address")) as address_file: mac_addr = address_file.read() @@ -382,7 +378,11 @@ def update_iboip_interfaces(self, mac_ip_array): mac_addr = mac_addr.upper() - match = re.match(r".+(\w\w):(\w\w):(\w\w):\w\w:\w\w:(\w\w):(\w\w):(\w\w)\n", mac_addr) + # if this is an IB interface, match IB-specific regex + if re.match(r"ib\w+", nic): + match = re.match(r".+(\w\w):(\w\w):(\w\w):\w\w:\w\w:(\w\w):(\w\w):(\w\w)\n", mac_addr) + else: + match = re.match(r"^(\w\w):(\w\w):(\w\w):(\w\w):(\w\w):(\w\w)$", mac_addr) if not match: logger.error("RDMA: failed to parse address for device {0} address {1}".format(nic, mac_addr)) continue @@ -473,7 +473,7 @@ def update_dat_conf(paths, ipv4_addr): @staticmethod def replace_dat_conf_contents(cfg, ipv4_addr): - old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" # pylint: disable=W1401 + old = r"ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format( ipv4_addr) return re.sub(old, new, cfg) diff --git a/azurelinuxagent/pa/rdma/suse.py b/azurelinuxagent/pa/rdma/suse.py index bcf971482e..347f3eeecb 100644 --- a/azurelinuxagent/pa/rdma/suse.py +++ b/azurelinuxagent/pa/rdma/suse.py @@ -23,15 +23,15 @@ from azurelinuxagent.pa.rdma.rdma import RDMAHandler from azurelinuxagent.common.version import DISTRO_VERSION +from azurelinuxagent.common.utils.distro_version import DistroVersion -from distutils.version import LooseVersion as Version class SUSERDMAHandler(RDMAHandler): def install_driver(self): # pylint: disable=R1710 """Install the appropriate driver package for the RDMA firmware""" - if Version(DISTRO_VERSION) >= Version('15'): + if DistroVersion(DISTRO_VERSION) >= DistroVersion('15'): msg = 'SLE 15 and later only supports PCI pass through, no ' msg += 'special driver needed for IB interface' logger.info(msg) diff --git a/azurelinuxagent/pa/rdma/ubuntu.py b/azurelinuxagent/pa/rdma/ubuntu.py index bef152f2e4..dd8652197d 100644 --- a/azurelinuxagent/pa/rdma/ubuntu.py +++ b/azurelinuxagent/pa/rdma/ubuntu.py @@ -37,7 +37,7 @@ def install_driver(self): logger.error("RDMA: Could not determine firmware version. No driver will be installed") return #replace . with _, we are looking for number like 144_0 - nd_version = re.sub('\.', '_', nd_version) # pylint: disable=W1401 + nd_version = re.sub(r'\.', '_', nd_version) #Check to see if we need to reconfigure driver status,module_name = shellutil.run_get_output('modprobe -R hv_network_direct', chk_err=False) @@ -79,13 +79,13 @@ def install_driver(self): status,output = shellutil.run_get_output('apt-cache show --no-all-versions linux-azure') if status != 0: return - r = re.search('Version: (\S+)', output) # pylint: disable=W1401 + r = re.search(r'Version: (\S+)', output) if not r: logger.error("RDMA: version not found in package linux-azure.") return package_version = r.groups()[0] #Remove the ending . after - package_version = re.sub("\.\d+$", "", package_version) # pylint: disable=W1401 + package_version = re.sub(r"\.\d+$", "", package_version) logger.info('RDMA: kernel_version=%s package_version=%s' % (kernel_version, package_version)) kernel_version_array = [ int(x) for x in kernel_version.split('.') ] @@ -111,9 +111,9 @@ def update_modprobed_conf(self, nd_version): with open(modprobed_file, 'r') as f: lines = f.read() - r = re.search('alias hv_network_direct hv_network_direct_\S+', lines) # pylint: disable=W1401 + r = re.search(r'alias hv_network_direct hv_network_direct_\S+', lines) if r: - lines = re.sub('alias hv_network_direct hv_network_direct_\S+', 'alias hv_network_direct hv_network_direct_%s' % nd_version, lines) # pylint: disable=W1401 + lines = re.sub(r'alias hv_network_direct hv_network_direct_\S+', 'alias hv_network_direct hv_network_direct_%s' % nd_version, lines) else: lines += '\nalias hv_network_direct hv_network_direct_%s\n' % nd_version with open('/etc/modprobe.d/vmbus-rdma.conf', 'w') as f: diff --git a/ci/2.7.pylintrc b/ci/2.7.pylintrc deleted file mode 100644 index 0cba65ee9d..0000000000 --- a/ci/2.7.pylintrc +++ /dev/null @@ -1,42 +0,0 @@ -# python2.7 uses pylint 1.9.5, whose docs can be found here: http://pylint.pycqa.org/en/1.9/technical_reference/features.html#messages -# python3.4 uses pylint 2.3.1, whose docs can be found here: http://pylint.pycqa.org/en/pylint-2.3.1/technical_reference/features.html - -[MESSAGES CONTROL] - -disable=C, # (C) convention, for programming standard violation - consider-using-dict-comprehension, # (R1717): *Consider using a dictionary comprehension* - consider-using-in, # (R1714): *Consider merging these comparisons with "in" to %r* - consider-using-set-comprehension, # (R1718): *Consider using a set comprehension* - consider-using-with, # (R1732): *Emitted if a resource-allocating assignment or call may be replaced by a 'with' block* - duplicate-code, # (R0801): *Similar lines in %s files* - no-init, # (W0232): Class has no __init__ method - no-else-break, # (R1723): *Unnecessary "%s" after "break"* - no-else-continue, # (R1724): *Unnecessary "%s" after "continue"* - no-else-raise, # (R1720): *Unnecessary "%s" after "raise"* - no-else-return, # (R1705): *Unnecessary "%s" after "return"* - no-self-use, # (R0201): *Method could be a function* - protected-access, # (W0212): Access to a protected member of a client class - simplifiable-if-expression, # (R1719): *The if expression can be replaced with %s* - simplifiable-if-statement, # (R1703): *The if statement can be replaced with %s* - super-with-arguments, # (R1725): *Consider using Python 3 style super() without arguments* - too-few-public-methods, # (R0903): *Too few public methods (%s/%s)* - too-many-ancestors, # (R0901): *Too many ancestors (%s/%s)* - too-many-arguments, # (R0913): *Too many arguments (%s/%s)* - too-many-boolean-expressions, # (R0916): *Too many boolean expressions in if statement (%s/%s)* - too-many-branches, # (R0912): *Too many branches (%s/%s)* - too-many-instance-attributes, # (R0902): *Too many instance attributes (%s/%s)* - too-many-locals, # (R0914): *Too many local variables (%s/%s)* - too-many-nested-blocks, # (R1702): *Too many nested blocks (%s/%s)* - too-many-public-methods, # (R0904): *Too many public methods (%s/%s)* - too-many-return-statements, # (R0911): *Too many return statements (%s/%s)* - too-many-statements, # (R0915): *Too many statements (%s/%s)* - useless-object-inheritance, # (R0205): *Class %r inherits from object, can be safely removed from bases in python3* - useless-return, # (R1711): *Useless return at end of function or method* - bad-continuation, # Buggy, **REMOVED in pylint-2.6.0** - bad-option-value, # pylint does not recognize the error code/symbol (needed to supress breaking changes across pylint versions) - bad-whitespace, # Used when a wrong number of spaces is used around an operator, bracket or block opener. - broad-except, # Used when an except catches a too general exception, possibly burying unrelated errors. - deprecated-lambda, # Used when a lambda is the first argument to “map” or “filter”. It could be clearer as a list comprehension or generator expression. (2.7 only) - missing-docstring, # Used when a module, function, class or method has no docstring - old-style-class, # Used when a class is defined that does not inherit from another class and does not inherit explicitly from “object”. (2.7 only) - fixme, # Used when a warning note as FIXME or TODO is detected diff --git a/ci/3.6.pylintrc b/ci/3.6.pylintrc deleted file mode 100644 index fcbae93831..0000000000 --- a/ci/3.6.pylintrc +++ /dev/null @@ -1,40 +0,0 @@ -# python 3.6+ uses the latest pylint version, whose docs can be found here: http://pylint.pycqa.org/en/stable/technical_reference/features.html - -[MESSAGES CONTROL] - -disable=C, # (C) convention, for programming standard violation - broad-except, # (W0703): *Catching too general exception %s* - consider-using-dict-comprehension, # (R1717): *Consider using a dictionary comprehension* - consider-using-in, # (R1714): *Consider merging these comparisons with "in" to %r* - consider-using-set-comprehension, # (R1718): *Consider using a set comprehension* - consider-using-with, # (R1732): *Emitted if a resource-allocating assignment or call may be replaced by a 'with' block* - duplicate-code, # (R0801): *Similar lines in %s files* - fixme, # Used when a warning note as FIXME or TODO is detected - no-else-break, # (R1723): *Unnecessary "%s" after "break"* - no-else-continue, # (R1724): *Unnecessary "%s" after "continue"* - no-else-raise, # (R1720): *Unnecessary "%s" after "raise"* - no-else-return, # (R1705): *Unnecessary "%s" after "return"* - no-init, # (W0232): Class has no __init__ method - no-self-use, # (R0201): *Method could be a function* - protected-access, # (W0212): Access to a protected member of a client class - raise-missing-from, # (W0707): *Consider explicitly re-raising using the 'from' keyword* - redundant-u-string-prefix, # The u prefix for strings is no longer necessary in Python >=3.0 - simplifiable-if-expression, # (R1719): *The if expression can be replaced with %s* - simplifiable-if-statement, # (R1703): *The if statement can be replaced with %s* - super-with-arguments, # (R1725): *Consider using Python 3 style super() without arguments* - too-few-public-methods, # (R0903): *Too few public methods (%s/%s)* - too-many-ancestors, # (R0901): *Too many ancestors (%s/%s)* - too-many-arguments, # (R0913): *Too many arguments (%s/%s)* - too-many-boolean-expressions, # (R0916): *Too many boolean expressions in if statement (%s/%s)* - too-many-branches, # (R0912): *Too many branches (%s/%s)* - too-many-instance-attributes, # (R0902): *Too many instance attributes (%s/%s)* - too-many-locals, # (R0914): *Too many local variables (%s/%s)* - too-many-nested-blocks, # (R1702): *Too many nested blocks (%s/%s)* - too-many-public-methods, # (R0904): *Too many public methods (%s/%s)* - too-many-return-statements, # (R0911): *Too many return statements (%s/%s)* - too-many-statements, # (R0915): *Too many statements (%s/%s)* - unspecified-encoding, # (W1514): Using open without explicitly specifying an encoding - use-a-generator, # (R1729): *Use a generator instead '%s(%s)'* - useless-object-inheritance, # (R0205): *Class %r inherits from object, can be safely removed from bases in python3* - useless-return, # (R1711): *Useless return at end of function or method* - diff --git a/ci/nosetests.sh b/ci/nosetests.sh index faefd902a9..15b83860ec 100755 --- a/ci/nosetests.sh +++ b/ci/nosetests.sh @@ -5,18 +5,18 @@ set -u EXIT_CODE=0 echo "=========================================" -echo "nosetests -a '!requires_sudo' output" +echo "**** nosetests non-sudo tests ****" echo "=========================================" -nosetests -a '!requires_sudo' tests $NOSEOPTS || EXIT_CODE=$(($EXIT_CODE || $?)) +nosetests --ignore-files test_cgroupconfigurator_sudo.py tests $NOSEOPTS || EXIT_CODE=$(($EXIT_CODE || $?)) echo EXIT_CODE no_sudo nosetests = $EXIT_CODE [[ -f .coverage ]] && \ sudo mv .coverage coverage.$(uuidgen).no_sudo.data echo "=========================================" -echo "nosetests -a 'requires_sudo' output" +echo "**** nosetests sudo tests ****" echo "=========================================" -sudo env "PATH=$PATH" nosetests -a 'requires_sudo' tests $NOSEOPTS || EXIT_CODE=$(($EXIT_CODE || $?)) +sudo env "PATH=$PATH" nosetests tests/ga/test_cgroupconfigurator_sudo.py $NOSEOPTS || EXIT_CODE=$(($EXIT_CODE || $?)) echo EXIT_CODE with_sudo nosetests = $EXIT_CODE [[ -f .coverage ]] && \ diff --git a/ci/pylintrc b/ci/pylintrc new file mode 100644 index 0000000000..f57949ab72 --- /dev/null +++ b/ci/pylintrc @@ -0,0 +1,42 @@ +[MESSAGES CONTROL] + +disable=C, # (C) convention, for programming standard violation + broad-except, # W0703: *Catching too general exception %s* + broad-exception-raised, # W0719: Raising too general exception: Exception + consider-using-dict-comprehension, # R1717: *Consider using a dictionary comprehension* + consider-using-from-import, # R0402: Use 'from foo import bar' instead + consider-using-in, # R1714: *Consider merging these comparisons with "in" to %r* + consider-using-max-builtin, # R1731: Consider using 'a = max(a, b)' instead of unnecessary if block + consider-using-min-builtin, # R1730: Consider using 'a = min(a, b)' instead of unnecessary if block + consider-using-set-comprehension, # R1718: *Consider using a set comprehension* + consider-using-with, # R1732: *Emitted if a resource-allocating assignment or call may be replaced by a 'with' block* + duplicate-code, # R0801: *Similar lines in %s files* + fixme, # Used when a warning note as FIXME or TODO is detected + logging-format-interpolation, # W1202: Use lazy % formatting in logging functions + logging-fstring-interpolation, # W1203: Use lazy % or .format() formatting in logging functions + no-else-break, # R1723: *Unnecessary "%s" after "break"* + no-else-continue, # R1724: *Unnecessary "%s" after "continue"* + no-else-raise, # R1720: *Unnecessary "%s" after "raise"* + no-else-return, # R1705: *Unnecessary "%s" after "return"* + protected-access, # W0212: Access to a protected member of a client class + raise-missing-from, # W0707: *Consider explicitly re-raising using the 'from' keyword* + redundant-u-string-prefix, # The u prefix for strings is no longer necessary in Python >=3.0 + simplifiable-if-expression, # R1719: *The if expression can be replaced with %s* + simplifiable-if-statement, # R1703: *The if statement can be replaced with %s* + super-with-arguments, # R1725: *Consider using Python 3 style super) without arguments* + too-few-public-methods, # R0903: *Too few public methods %s/%s)* + too-many-ancestors, # R0901: *Too many ancestors %s/%s)* + too-many-arguments, # R0913: *Too many arguments %s/%s)* + too-many-boolean-expressions, # R0916: *Too many boolean expressions in if statement %s/%s)* + too-many-branches, # R0912: *Too many branches %s/%s)* + too-many-instance-attributes, # R0902: *Too many instance attributes %s/%s)* + too-many-locals, # R0914: *Too many local variables %s/%s)* + too-many-nested-blocks, # R1702: *Too many nested blocks %s/%s)* + too-many-public-methods, # R0904: *Too many public methods %s/%s)* + too-many-return-statements, # R0911: *Too many return statements %s/%s)* + too-many-statements, # R0915: *Too many statements %s/%s)* + unspecified-encoding, # W1514: Using open without explicitly specifying an encoding + use-a-generator, # R1729: *Use a generator instead '%s%s)'* + use-yield-from, # R1737: Use 'yield from' directly instead of yielding each element one by one + useless-object-inheritance, # R0205: *Class %r inherits from object, can be safely removed from bases in python3* + useless-return, # R1711: *Useless return at end of function or method* diff --git a/ci/pytest.ini b/ci/pytest.ini new file mode 100644 index 0000000000..7e0cb25396 --- /dev/null +++ b/ci/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +filterwarnings = + ignore:distro.linux_distribution\(\) is deprecated diff --git a/ci/pytest.sh b/ci/pytest.sh new file mode 100755 index 0000000000..de240a3b89 --- /dev/null +++ b/ci/pytest.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -u + +EXIT_CODE=0 + +echo "=========================================" +echo "**** pytest *** non-sudo tests ****" +echo "=========================================" +pytest --verbose --config-file ci/pytest.ini --ignore-glob '*/test_cgroupconfigurator_sudo.py' tests || EXIT_CODE=$(($EXIT_CODE || $?)) +echo EXIT_CODE pytests non-sudo = $EXIT_CODE + +echo "=========================================" +echo "**** pytest *** sudo tests ****" +echo "=========================================" +sudo env "PATH=$PATH" pytest --verbose --config-file ci/pytest.ini tests/ga/test_cgroupconfigurator_sudo.py || EXIT_CODE=$(($EXIT_CODE || $?)) +echo EXIT_CODE pytests sudo = $EXIT_CODE + +exit "$EXIT_CODE" diff --git a/requirements.txt b/requirements.txt index b0b7c87457..ab6958a732 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ distro; python_version >= '3.8' -pyasn1 \ No newline at end of file +pyasn1 +legacycrypt; python_version >= '3.13' diff --git a/setup.py b/setup.py index 6b54d09e76..0bb053d4c2 100755 --- a/setup.py +++ b/setup.py @@ -147,7 +147,7 @@ def get_data_files(name, version, fullname): # pylint: disable=R0912 src=["config/clearlinux/waagent.conf"]) set_systemd_files(data_files, dest=systemd_dir_path, src=["init/clearlinux/waagent.service"]) - elif name == 'mariner': + elif name in ["mariner", "azurelinux"]: set_bin_files(data_files, dest=agent_bin_path) set_conf_files(data_files, dest="/etc", src=["config/mariner/waagent.conf"]) @@ -314,13 +314,16 @@ def run(self): # Note to packagers and users from source. -# In version 3.5 of Python distribution information handling in the platform -# module was deprecated. Depending on the Linux distribution the -# implementation may be broken prior to Python 3.7 wher the functionality -# will be removed from Python 3 -requires = [] # pylint: disable=invalid-name -if sys.version_info[0] >= 3 and sys.version_info[1] >= 7: - requires = ['distro'] # pylint: disable=invalid-name +# * In version 3.5 of Python distribution information handling in the platform +# module was deprecated. Depending on the Linux distribution the +# implementation may be broken prior to Python 3.8 where the functionality +# will be removed from Python 3. +# * In version 3.13 of Python, the crypt module was removed and legacycrypt is +# required instead. +requires = [ + "distro;python_version>='3.8'", + "legacycrypt;python_version>='3.13'", +] modules = [] # pylint: disable=invalid-name diff --git a/test-requirements.txt b/test-requirements.txt index 2b9467870e..0d653912ea 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,8 +3,9 @@ mock==2.0.0; python_version == '2.6' mock==3.0.5; python_version >= '2.7' and python_version <= '3.5' mock==4.0.2; python_version >= '3.6' distro; python_version >= '3.8' -nose -nose-timer; python_version >= '2.7' +nose; python_version <= '3.9' +nose-timer; python_version >= '2.7' and python_version <= '3.9' +pytest; python_version >= '3.10' # Pinning the wrapt requirement to 1.12.0 due to the bug - https://github.com/GrahamDumpleton/wrapt/issues/188 wrapt==1.12.0; python_version > '2.6' and python_version < '3.6' diff --git a/tests/common/osutil/test_default.py b/tests/common/osutil/test_default.py index afe27ae595..7cb5501c1e 100644 --- a/tests/common/osutil/test_default.py +++ b/tests/common/osutil/test_default.py @@ -964,7 +964,7 @@ def test_remove_firewall_should_not_retry_invalid_rule(self): self.assertFalse(osutil._enable_firewall) - @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4 for now. Need to revisit to fix it") + @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4, they run on containers where the OS commands needed by the test are not present.") def test_get_nic_state(self): state = osutil.DefaultOSUtil().get_nic_state() self.assertNotEqual(state, {}) @@ -1111,6 +1111,15 @@ def test_get_hostname_record_should_initialize_the_host_name_using_cloud_init_in self.assertEqual(expected, actual, "get_hostname_record returned an incorrect hostname") self.assertEqual(expected, self.__get_published_hostname_contents(), "get_hostname_record returned an incorrect hostname") + def test_get_password_hash(self): + with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_passwords.txt'), 'rb') as in_file: + for data in in_file: + # Remove bom on bytes data before it is converted into string. + data = textutil.remove_bom(data) + data = ustr(data, encoding='utf-8') + password_hash = osutil.DefaultOSUtil.gen_password_hash(data, 6, 10) + self.assertNotEqual(None, password_hash) + if __name__ == '__main__': unittest.main() diff --git a/tests/common/osutil/test_factory.py b/tests/common/osutil/test_factory.py index 46bf6a8758..5bfb867d43 100644 --- a/tests/common/osutil/test_factory.py +++ b/tests/common/osutil/test_factory.py @@ -99,7 +99,7 @@ def test_get_osutil_it_should_return_ubuntu(self): self.assertEqual(ret.get_service_name(), "walinuxagent") ret = _get_osutil(distro_name="ubuntu", - distro_code_name="focal", + distro_code_name="noble", distro_version="24.04", distro_full_name="") self.assertTrue(isinstance(ret, Ubuntu18OSUtil)) diff --git a/tests/common/utils/test_passwords.txt b/tests/common/osutil/test_passwords.txt similarity index 100% rename from tests/common/utils/test_passwords.txt rename to tests/common/osutil/test_passwords.txt diff --git a/tests/common/protocol/test_goal_state.py b/tests/common/protocol/test_goal_state.py index 5b4a2948af..5a63586e5d 100644 --- a/tests/common/protocol/test_goal_state.py +++ b/tests/common/protocol/test_goal_state.py @@ -401,7 +401,7 @@ def test_it_should_download_certs_on_a_new_fast_track_goal_state(self): with mock_wire_protocol(data_file) as protocol: goal_state = GoalState(protocol.client) - cert = "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F" + cert = "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9" crt_path = os.path.join(self.tmp_dir, cert + ".crt") prv_path = os.path.join(self.tmp_dir, cert + ".prv") @@ -426,7 +426,7 @@ def test_it_should_download_certs_on_a_new_fabric_goal_state(self): protocol.mock_wire_data.set_vm_settings_source(GoalStateSource.Fabric) goal_state = GoalState(protocol.client) - cert = "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F" + cert = "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9" crt_path = os.path.join(self.tmp_dir, cert + ".crt") prv_path = os.path.join(self.tmp_dir, cert + ".prv") diff --git a/tests/common/protocol/test_imds.py b/tests/common/protocol/test_imds.py index efc705ffab..9333a5f9a3 100644 --- a/tests/common/protocol/test_imds.py +++ b/tests/common/protocol/test_imds.py @@ -56,7 +56,7 @@ class TestImds(AgentTestCase): def test_get(self, mock_http_get): mock_http_get.return_value = get_mock_compute_response() - test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP) + test_subject = imds.ImdsClient() test_subject.get_compute() self.assertEqual(1, mock_http_get.call_count) @@ -71,21 +71,21 @@ def test_get(self, mock_http_get): def test_get_bad_request(self, mock_http_get): mock_http_get.return_value = MockHttpResponse(status=restutil.httpclient.BAD_REQUEST) - test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP) + test_subject = imds.ImdsClient() self.assertRaises(HttpError, test_subject.get_compute) @patch("azurelinuxagent.common.protocol.imds.restutil.http_get") def test_get_internal_service_error(self, mock_http_get): mock_http_get.return_value = MockHttpResponse(status=restutil.httpclient.INTERNAL_SERVER_ERROR) - test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP) + test_subject = imds.ImdsClient() self.assertRaises(HttpError, test_subject.get_compute) @patch("azurelinuxagent.common.protocol.imds.restutil.http_get") def test_get_empty_response(self, mock_http_get): mock_http_get.return_value = MockHttpResponse(status=httpclient.OK, body=''.encode('utf-8')) - test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP) + test_subject = imds.ImdsClient() self.assertRaises(ValueError, test_subject.get_compute) def test_deserialize_ComputeInfo(self): @@ -359,7 +359,7 @@ def _imds_response(f): return fh.read() def _assert_validation(self, http_status_code, http_response, expected_valid, expected_response): - test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP) + test_subject = imds.ImdsClient() with patch("azurelinuxagent.common.utils.restutil.http_get") as mock_http_get: mock_http_get.return_value = MockHttpResponse(status=http_status_code, reason='reason', @@ -386,99 +386,86 @@ def test_endpoint_fallback(self): # http GET calls and enforces a single GET call (fallback would cause 2) and # checks the url called. - test_subject = imds.ImdsClient("foo.bar") + test_subject = imds.ImdsClient() # ensure user-agent gets set correctly for is_health, expected_useragent in [(False, restutil.HTTP_USER_AGENT), (True, restutil.HTTP_USER_AGENT_HEALTH)]: # set a different resource path for health query to make debugging unit test easier resource_path = 'something/health' if is_health else 'something' - for has_primary_ioerror in (False, True): - # secondary endpoint unreachable - test_subject._http_get = Mock(side_effect=self._mock_http_get) - self._mock_imds_setup(primary_ioerror=has_primary_ioerror, secondary_ioerror=True) - result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) - self.assertFalse(result.success) if has_primary_ioerror else self.assertTrue(result.success) # pylint: disable=expression-not-assigned - self.assertFalse(result.service_error) - if has_primary_ioerror: - self.assertEqual('IMDS error in /metadata/{0}: Unable to connect to endpoint'.format(resource_path), result.response) - else: - self.assertEqual('Mock success response', result.response) - for _, kwargs in test_subject._http_get.call_args_list: - self.assertTrue('User-Agent' in kwargs['headers']) - self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) - self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) - - # IMDS success - test_subject._http_get = Mock(side_effect=self._mock_http_get) - self._mock_imds_setup(primary_ioerror=has_primary_ioerror) - result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) - self.assertTrue(result.success) - self.assertFalse(result.service_error) - self.assertEqual('Mock success response', result.response) - for _, kwargs in test_subject._http_get.call_args_list: - self.assertTrue('User-Agent' in kwargs['headers']) - self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) - self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) - - # IMDS throttled - test_subject._http_get = Mock(side_effect=self._mock_http_get) - self._mock_imds_setup(primary_ioerror=has_primary_ioerror, throttled=True) - result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) - self.assertFalse(result.success) - self.assertFalse(result.service_error) - self.assertEqual('IMDS error in /metadata/{0}: Throttled'.format(resource_path), result.response) - for _, kwargs in test_subject._http_get.call_args_list: - self.assertTrue('User-Agent' in kwargs['headers']) - self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) - self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) - - # IMDS gone error - test_subject._http_get = Mock(side_effect=self._mock_http_get) - self._mock_imds_setup(primary_ioerror=has_primary_ioerror, gone_error=True) - result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) - self.assertFalse(result.success) - self.assertTrue(result.service_error) - self.assertEqual('IMDS error in /metadata/{0}: HTTP Failed with Status Code 410: Gone'.format(resource_path), result.response) - for _, kwargs in test_subject._http_get.call_args_list: - self.assertTrue('User-Agent' in kwargs['headers']) - self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) - self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) - - # IMDS bad request - test_subject._http_get = Mock(side_effect=self._mock_http_get) - self._mock_imds_setup(primary_ioerror=has_primary_ioerror, bad_request=True) - result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) - self.assertFalse(result.success) - self.assertFalse(result.service_error) - self.assertEqual('IMDS error in /metadata/{0}: [HTTP Failed] [404: reason] Mock not found'.format(resource_path), result.response) - for _, kwargs in test_subject._http_get.call_args_list: - self.assertTrue('User-Agent' in kwargs['headers']) - self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) - self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count) - - def _mock_imds_setup(self, primary_ioerror=False, secondary_ioerror=False, gone_error=False, throttled=False, bad_request=False): - self._mock_imds_expect_fallback = primary_ioerror # pylint: disable=attribute-defined-outside-init - self._mock_imds_primary_ioerror = primary_ioerror # pylint: disable=attribute-defined-outside-init - self._mock_imds_secondary_ioerror = secondary_ioerror # pylint: disable=attribute-defined-outside-init + # IMDS success + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup() + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertTrue(result.success) + self.assertFalse(result.service_error) + self.assertEqual('Mock success response', result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(1, test_subject._http_get.call_count) + + # Connection error + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(ioerror=True) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertFalse(result.success) + self.assertFalse(result.service_error) + self.assertEqual('IMDS error in /metadata/{0}: Unable to connect to endpoint'.format(resource_path), result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(1, test_subject._http_get.call_count) + + # IMDS throttled + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(throttled=True) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertFalse(result.success) + self.assertFalse(result.service_error) + self.assertEqual('IMDS error in /metadata/{0}: Throttled'.format(resource_path), result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(1, test_subject._http_get.call_count) + + # IMDS gone error + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(gone_error=True) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertFalse(result.success) + self.assertTrue(result.service_error) + self.assertEqual('IMDS error in /metadata/{0}: HTTP Failed with Status Code 410: Gone'.format(resource_path), result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(1, test_subject._http_get.call_count) + + # IMDS bad request + test_subject._http_get = Mock(side_effect=self._mock_http_get) + self._mock_imds_setup(bad_request=True) + result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health) + self.assertFalse(result.success) + self.assertFalse(result.service_error) + self.assertEqual('IMDS error in /metadata/{0}: [HTTP Failed] [404: reason] Mock not found'.format(resource_path), result.response) + for _, kwargs in test_subject._http_get.call_args_list: + self.assertTrue('User-Agent' in kwargs['headers']) + self.assertEqual(expected_useragent, kwargs['headers']['User-Agent']) + self.assertEqual(1, test_subject._http_get.call_count) + + def _mock_imds_setup(self, ioerror=False, gone_error=False, throttled=False, bad_request=False): + self._mock_imds_ioerror = ioerror # pylint: disable=attribute-defined-outside-init self._mock_imds_gone_error = gone_error # pylint: disable=attribute-defined-outside-init self._mock_imds_throttled = throttled # pylint: disable=attribute-defined-outside-init self._mock_imds_bad_request = bad_request # pylint: disable=attribute-defined-outside-init def _mock_http_get(self, *_, **kwargs): - if "foo.bar" == kwargs['endpoint'] and not self._mock_imds_expect_fallback: - raise Exception("Unexpected endpoint called") - if self._mock_imds_primary_ioerror and "169.254.169.254" == kwargs['endpoint']: - raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made" - .format(kwargs['endpoint'], kwargs['resource_path'])) - if self._mock_imds_secondary_ioerror and "foo.bar" == kwargs['endpoint']: - raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made" - .format(kwargs['endpoint'], kwargs['resource_path'])) + if self._mock_imds_ioerror: + raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made".format(kwargs['endpoint'], kwargs['resource_path'])) if self._mock_imds_gone_error: raise ResourceGoneError("Resource is gone") if self._mock_imds_throttled: - raise HttpError("[HTTP Retry] GET http://{0}/metadata/{1} -- Status Code 429 -- 25 attempts made" - .format(kwargs['endpoint'], kwargs['resource_path'])) + raise HttpError("[HTTP Retry] GET http://{0}/metadata/{1} -- Status Code 429 -- 25 attempts made".format(kwargs['endpoint'], kwargs['resource_path'])) resp = MagicMock() resp.reason = 'reason' diff --git a/tests/common/protocol/test_protocol_util.py b/tests/common/protocol/test_protocol_util.py index b60ca9af95..494d25319d 100644 --- a/tests/common/protocol/test_protocol_util.py +++ b/tests/common/protocol/test_protocol_util.py @@ -188,8 +188,8 @@ def test_get_protocol_wireserver_to_wireserver_update_removes_metadataserver_art self.assertFalse(os.path.exists(mds_cert_path)) # Check firewall rules was reset - protocol_util.osutil.remove_firewall.assert_called_once() - protocol_util.osutil.enable_firewall.assert_called_once() + self.assertEqual(1, protocol_util.osutil.remove_firewall.call_count, "remove_firewall should be called once") + self.assertEqual(1, protocol_util.osutil.enable_firewall.call_count, "enable_firewall should be called once") @patch('azurelinuxagent.common.conf.get_lib_dir') @patch('azurelinuxagent.common.conf.enable_firewall') @@ -234,8 +234,8 @@ def test_get_protocol_metadataserver_to_wireserver_update_removes_metadataserver self.assertTrue(os.path.isfile(ws_cert_path)) # Check firewall rules was reset - protocol_util.osutil.remove_firewall.assert_called_once() - protocol_util.osutil.enable_firewall.assert_called_once() + self.assertEqual(1, protocol_util.osutil.remove_firewall.call_count, "remove_firewall should be called once") + self.assertEqual(1, protocol_util.osutil.enable_firewall.call_count, "enable_firewall should be called once") # Check Protocol File is updated to WireProtocol with open(os.path.join(dir, PROTOCOL_FILE_NAME), "r") as f: diff --git a/tests/common/protocol/test_wire.py b/tests/common/protocol/test_wire.py index 9ce8339e94..2fbd817ff8 100644 --- a/tests/common/protocol/test_wire.py +++ b/tests/common/protocol/test_wire.py @@ -95,11 +95,11 @@ def _test_getters(self, test_data, certsMustBePresent, __, MockCryptUtil, _): protocol.get_goal_state().fetch_extension_manifest(ext_handler.name, ext_handler.manifest_uris) crt1 = os.path.join(self.tmp_dir, - '38B85D88F03D1A8E1C671EB169274C09BC4D4703.crt') + '8979F1AC8C4215827BF3B5A403E6137B504D02A4.crt') crt2 = os.path.join(self.tmp_dir, - 'BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F.crt') + 'F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9.crt') prv2 = os.path.join(self.tmp_dir, - 'BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F.prv') + 'F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9.prv') if certsMustBePresent: self.assertTrue(os.path.isfile(crt1)) self.assertTrue(os.path.isfile(crt2)) @@ -479,6 +479,28 @@ def test_report_event_large_event(self, patch_send_event, *args): # pylint: dis self.assertEqual(patch_send_event.call_count, 0) + def test_get_header_for_cert_should_use_triple_des(self, *_): + with mock_wire_protocol(wire_protocol_data.DATA_FILE) as protocol: + headers = protocol.client.get_header_for_cert() + self.assertIn("x-ms-cipher-name", headers) + self.assertEqual(headers["x-ms-cipher-name"], "DES_EDE3_CBC", "Unexpected x-ms-cipher-name") + + def test_get_header_for_remote_access_should_use_aes128(self, *_): + with mock_wire_protocol(wire_protocol_data.DATA_FILE) as protocol: + headers = protocol.client.get_header_for_remote_access() + self.assertIn("x-ms-cipher-name", headers) + self.assertEqual(headers["x-ms-cipher-name"], "AES128_CBC", "Unexpected x-ms-cipher-name") + + def test_detect_should_handle_inconsistent_goal_state_errors(self, *_): + data_file = wire_protocol_data.DATA_FILE_VM_SETTINGS # Certificates are checked only on FastTrack goal states + data_file['certs'] = "wire/certs-2.xml" # Change the certificates to force a GoalStateInconsistentError + with mock_wire_protocol(data_file, detect_protocol=False) as protocol: + with patch("azurelinuxagent.common.logger.warn") as mock_warn: + protocol.detect() + self.assertTrue( + any(len(args) == 2 and args[1].startswith("[GoalStateInconsistentError]") for args, _ in mock_warn.call_args_list), + "Did not find any warnings about an GoalStateInconsistentError: {0}".format(mock_warn.call_args_list)) + class TestWireClient(HttpRequestPredicates, AgentTestCase): def test_get_ext_conf_without_extensions_should_retrieve_vmagent_manifests_info(self, *args): # pylint: disable=unused-argument diff --git a/tests/common/test_event.py b/tests/common/test_event.py index 435ac2e80d..51eb15726d 100644 --- a/tests/common/test_event.py +++ b/tests/common/test_event.py @@ -60,7 +60,7 @@ def setUp(self): self.event_dir = os.path.join(self.tmp_dir, EVENTS_DIRECTORY) EventLoggerTools.initialize_event_logger(self.event_dir) - threading.current_thread().setName("TestEventThread") + threading.current_thread().name = "TestEventThread" osutil = get_osutil() self.expected_common_parameters = { @@ -70,7 +70,7 @@ def setUp(self): CommonTelemetryEventSchema.ContainerId: AgentGlobals.get_container_id(), CommonTelemetryEventSchema.EventTid: threading.current_thread().ident, CommonTelemetryEventSchema.EventPid: os.getpid(), - CommonTelemetryEventSchema.TaskName: threading.current_thread().getName(), + CommonTelemetryEventSchema.TaskName: threading.current_thread().name, CommonTelemetryEventSchema.KeywordName: json.dumps({"CpuArchitecture": platform.machine()}), # common parameters computed from the OS platform CommonTelemetryEventSchema.OSVersion: EventLoggerTools.get_expected_os_version(), @@ -415,7 +415,7 @@ def test_collect_events_should_be_able_to_process_events_with_non_ascii_characte self.assertEqual(len(event_list), 1) self.assertEqual(TestEvent._get_event_message(event_list[0]), u'World\u05e2\u05d9\u05d5\u05ea \u05d0\u05d7\u05e8\u05d5\u05ea\u0906\u091c') - @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4 for now. Need to revisit to fix it") + @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4, they run on containers where the OS commands needed by the test are not present.") def test_collect_events_should_ignore_invalid_event_files(self): self._create_test_event_file("custom_script_1.tld") # a valid event self._create_test_event_file("custom_script_utf-16.tld") diff --git a/tests/common/test_logger.py b/tests/common/test_logger.py index d792eb8577..4e72e00109 100644 --- a/tests/common/test_logger.py +++ b/tests/common/test_logger.py @@ -15,7 +15,6 @@ # Requires Python 2.6+ and Openssl 1.0+ # -import json # pylint: disable=unused-import import os import tempfile from datetime import datetime, timedelta @@ -49,6 +48,7 @@ def tearDown(self): AgentTestCase.tearDown(self) logger.reset_periodic() logger.DEFAULT_LOGGER.appenders *= 0 + logger.set_prefix(None) fileutil.rm_dirs(self.event_dir) @patch('azurelinuxagent.common.logger.Logger.verbose') diff --git a/tests/common/test_singletonperthread.py b/tests/common/test_singletonperthread.py index 7b1972635e..91681f2bd7 100644 --- a/tests/common/test_singletonperthread.py +++ b/tests/common/test_singletonperthread.py @@ -1,12 +1,12 @@ import uuid from multiprocessing import Queue -from threading import Thread, currentThread +from threading import Thread, current_thread from azurelinuxagent.common.singletonperthread import SingletonPerThread from tests.lib.tools import AgentTestCase, clear_singleton_instances -class TestClassToTestSingletonPerThread(SingletonPerThread): +class Singleton(SingletonPerThread): """ Since these tests deal with testing in a multithreaded environment, we employ the use of multiprocessing.Queue() to ensure that the data is consistent. @@ -32,7 +32,7 @@ class TestClassToTestSingletonPerThread(SingletonPerThread): def __init__(self): # Set the name of the object to the current thread name - self.name = currentThread().getName() + self.name = current_thread().name # Unique identifier for a class object self.uuid = str(uuid.uuid4()) @@ -47,14 +47,14 @@ def setUp(self): # In a multi-threaded environment, exceptions thrown in the child thread will not be propagated to the parent # thread. In order to achieve that, adding all exceptions to a Queue and then checking that in parent thread. self.errors = Queue() - clear_singleton_instances(TestClassToTestSingletonPerThread) + clear_singleton_instances(Singleton) def _setup_multithread_and_execute(self, func1, args1, func2, args2, t1_name=None, t2_name=None): t1 = Thread(target=func1, args=args1) t2 = Thread(target=func2, args=args2) - t1.setName(t1_name if t1_name else self.THREAD_NAME_1) - t2.setName(t2_name if t2_name else self.THREAD_NAME_2) + t1.name = t1_name if t1_name else self.THREAD_NAME_1 + t2.name = t2_name if t2_name else self.THREAD_NAME_2 t1.start() t2.start() t1.join() @@ -69,7 +69,7 @@ def _setup_multithread_and_execute(self, func1, args1, func2, args2, t1_name=Non @staticmethod def _get_test_class_instance(q, err): try: - obj = TestClassToTestSingletonPerThread() + obj = Singleton() q.put(obj) except Exception as e: err.put(str(e)) @@ -91,8 +91,8 @@ def check_obj(name): return t1_object, t2_object def test_it_should_have_only_one_instance_for_same_thread(self): - obj1 = TestClassToTestSingletonPerThread() - obj2 = TestClassToTestSingletonPerThread() + obj1 = Singleton() + obj2 = Singleton() self.assertEqual(obj1.uuid, obj2.uuid) @@ -137,7 +137,7 @@ def test_singleton_object_should_match_thread_name(self): t1_name = str(uuid.uuid4()) t2_name = str(uuid.uuid4()) - test_class_obj_name = lambda t_name: "%s__%s" % (TestClassToTestSingletonPerThread.__name__, t_name) + test_class_obj_name = lambda t_name: "%s__%s" % (Singleton.__name__, t_name) self._setup_multithread_and_execute(func1=self._get_test_class_instance, args1=(instances, self.errors), @@ -146,7 +146,7 @@ def test_singleton_object_should_match_thread_name(self): t1_name=t1_name, t2_name=t2_name) - singleton_instances = TestClassToTestSingletonPerThread._instances # pylint: disable=no-member + singleton_instances = Singleton._instances # pylint: disable=no-member # Assert instance names are consistent with the thread names self.assertIn(test_class_obj_name(t1_name), singleton_instances) diff --git a/tests/common/test_telemetryevent.py b/tests/common/test_telemetryevent.py index ce232dab0b..27a808ddc7 100644 --- a/tests/common/test_telemetryevent.py +++ b/tests/common/test_telemetryevent.py @@ -19,23 +19,23 @@ from tests.lib.tools import AgentTestCase -def get_test_event(name="DummyExtension", op="Unknown", is_success=True, duration=0, version="foo", evt_type="", is_internal=False, - message="DummyMessage", eventId=1): - event = TelemetryEvent(eventId, "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") - event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Name, name)) - event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Version, str(version))) - event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.IsInternal, is_internal)) - event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Operation, op)) - event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.OperationSuccess, is_success)) - event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Message, message)) - event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Duration, duration)) - event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.ExtensionType, evt_type)) - return event - - class TestTelemetryEvent(AgentTestCase): + @staticmethod + def _get_test_event(name="DummyExtension", op="Unknown", is_success=True, duration=0, version="foo", evt_type="", is_internal=False, + message="DummyMessage", eventId=1): + event = TelemetryEvent(eventId, "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX") + event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Name, name)) + event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Version, str(version))) + event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.IsInternal, is_internal)) + event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Operation, op)) + event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.OperationSuccess, is_success)) + event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Message, message)) + event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.Duration, duration)) + event.parameters.append(TelemetryEventParam(GuestAgentExtensionEventsSchema.ExtensionType, evt_type)) + return event + def test_contains_works_for_TelemetryEvent(self): - test_event = get_test_event(message="Dummy Event") + test_event = TestTelemetryEvent._get_test_event(message="Dummy Event") self.assertTrue(GuestAgentExtensionEventsSchema.Name in test_event) self.assertTrue(GuestAgentExtensionEventsSchema.Version in test_event) diff --git a/tests/common/utils/test_distro_version.py b/tests/common/utils/test_distro_version.py new file mode 100644 index 0000000000..bc279377f8 --- /dev/null +++ b/tests/common/utils/test_distro_version.py @@ -0,0 +1,128 @@ +import os +import sys +import unittest + +from tests.lib.tools import AgentTestCase, data_dir + +from azurelinuxagent.common.utils.distro_version import DistroVersion +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion + + +class TestDistroVersion(AgentTestCase): + + def test_it_should_implement_all_comparison_operators(self): + self.assertTrue(DistroVersion("1.0.0") < DistroVersion("1.1.0")) + self.assertTrue(DistroVersion("1.0.0") <= DistroVersion("1.0.0")) + self.assertTrue(DistroVersion("1.0.0") <= DistroVersion("1.1.0")) + + self.assertTrue(DistroVersion("1.1.0") > DistroVersion("1.0.0")) + self.assertTrue(DistroVersion("1.1.0") >= DistroVersion("1.1.0")) + self.assertTrue(DistroVersion("1.1.0") >= DistroVersion("1.0.0")) + + self.assertTrue(DistroVersion("1.1.0") != DistroVersion("1.0.0")) + self.assertTrue(DistroVersion("1.1.0") == DistroVersion("1.1.0")) + + def test_it_should_compare_digit_sequences_numerically(self): + self.assertTrue(DistroVersion("2.0.0") < DistroVersion("10.0.0")) + self.assertTrue(DistroVersion("1.2.0") < DistroVersion("1.10.0")) + self.assertTrue(DistroVersion("1.0.2") < DistroVersion("1.0.10")) + self.assertTrue(DistroVersion("2.0.rc.2") < DistroVersion("2.0.rc.10")) + self.assertTrue(DistroVersion("2.0.rc2") < DistroVersion("2.0.rc10")) + + def test_it_should_compare_non_digit_sequences_lexicographically(self): + self.assertTrue(DistroVersion("2.0.alpha") < DistroVersion("2.0.beta")) + self.assertTrue(DistroVersion("2.0.alpha.2") < DistroVersion("2.0.beta.1")) + self.assertTrue(DistroVersion("alpha") < DistroVersion("beta")) + self.assertTrue(DistroVersion("<1.0.0>") < DistroVersion(">1.0.0>")) + + def test_it_should_parse_common_distro_versions(self): + """ + Test that DistroVersion can parse the versions given by azurelinuxagent.common.version.DISTRO_VERSION + (the values in distro_versions.txt are current values from telemetry.) + """ + data_file = os.path.join(data_dir, "distro_versions.txt") + + with open(data_file, "r") as f: + for line in f: + line = line.rstrip() + version = DistroVersion(line) + self.assertNotEqual([], version._fragments) + + self.assertEqual([], DistroVersion("")._fragments) + + def test_it_should_compare_commonly_used_versions(self): + """ + Test that DistroVersion does some common comparisons correctly. + """ + self.assertTrue(DistroVersion("1.0.0") < DistroVersion("2.0.0.")) + self.assertTrue(DistroVersion("1.0.0") < DistroVersion("1.1.0")) + self.assertTrue(DistroVersion("1.0.0") < DistroVersion("1.0.1")) + + self.assertTrue(DistroVersion("1.0.0") == DistroVersion("1.0.0")) + self.assertTrue(DistroVersion("1.0.0") != DistroVersion("2.0.0")) + + self.assertTrue(DistroVersion("13") != DistroVersion("13.0")) + self.assertTrue(DistroVersion("13") < DistroVersion("13.0")) + self.assertTrue(DistroVersion("13") < DistroVersion("13.1")) + + ubuntu_version = DistroVersion("16.10") + self.assertTrue(ubuntu_version in [DistroVersion('16.04'), DistroVersion('16.10'), DistroVersion('17.04')]) + + ubuntu_version = DistroVersion("20.10") + self.assertTrue(DistroVersion('18.04') <= ubuntu_version <= DistroVersion('24.04')) + + redhat_version = DistroVersion("7.9") + self.assertTrue(DistroVersion('7') <= redhat_version <= DistroVersion('9')) + + self.assertTrue(DistroVersion("1.0") < DistroVersion("1.1")) + self.assertTrue(DistroVersion("1.9") < DistroVersion("1.10")) + self.assertTrue(DistroVersion("1.9.9") < DistroVersion("1.10.0")) + self.assertTrue(DistroVersion("1.0.0.0") < DistroVersion("1.2.0.0")) + + self.assertTrue(DistroVersion("1.0") <= DistroVersion("1.1")) + self.assertTrue(DistroVersion("1.1") > DistroVersion("1.0")) + self.assertTrue(DistroVersion("1.1") >= DistroVersion("1.0")) + + self.assertTrue(DistroVersion("1.0") == DistroVersion("1.0")) + self.assertTrue(DistroVersion("1.0") >= DistroVersion("1.0")) + self.assertTrue(DistroVersion("1.0") <= DistroVersion("1.0")) + + def test_uncommon_versions(self): + """ + The comparisons in these tests may occur in prod, and they not always produce a result that makes sense. + More than expressing the desired behavior, these tests are meant to document the current behavior. + """ + self.assertTrue(DistroVersion("2") != DistroVersion("2.0")) + self.assertTrue(DistroVersion("2") < DistroVersion("2.0")) + + self.assertTrue(DistroVersion("10.0_RC2") != DistroVersion("10.0RC2")) + self.assertTrue(DistroVersion("10.0_RC2")._fragments == [10, 0, '_', 'RC', 2]) + self.assertTrue(DistroVersion("10.0RC2")._fragments == [10, 0, 'RC', 2]) + + self.assertTrue(DistroVersion("1.4-rolling") < DistroVersion("1.4-rolling-202402090309")) + + self.assertTrue(DistroVersion("2023") < DistroVersion("2023.02.1")) + + self.assertTrue(DistroVersion("2.1-systemd-alpha") < DistroVersion("2.1-systemd-rc")) + self.assertTrue(DistroVersion("2308a") < DistroVersion("2308beta")) + self.assertTrue(DistroVersion("6.0.0.beta4") < DistroVersion("6.0.0.beta5")) + self.assertTrue(DistroVersion("9.13.1P8X1") < DistroVersion("9.13.1RC1")) + self.assertTrue(DistroVersion("a") < DistroVersion("rc")) + self.assertTrue(DistroVersion("Clawhammer__9.14.0"), DistroVersion("Clawhammer__9.14.1")) + self.assertTrue(DistroVersion("FFFF") < DistroVersion("h")) + self.assertTrue(DistroVersion("None") < DistroVersion("n/a")) + + if sys.version_info[0] == 2: + self.assertTrue(DistroVersion("3.11.2-rc.1") < DistroVersion("3.11.2-rc.a")) + else: + # TypeError: '<' not supported between instances of 'int' and 'str' + with self.assertRaises(TypeError): + _ = DistroVersion("3.11.2-rc.1") < DistroVersion("3.11.2-rc.a") + + # AttributeError: 'FlexibleVersion' object has no attribute '_fragments' + with self.assertRaises(AttributeError): + _ = DistroVersion("1.0.0.0") == FlexibleVersion("1.0.0.0") + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/common/utils/test_extension_process_util.py b/tests/common/utils/test_extension_process_util.py index 316bad6a37..8058292b95 100644 --- a/tests/common/utils/test_extension_process_util.py +++ b/tests/common/utils/test_extension_process_util.py @@ -19,9 +19,9 @@ import subprocess import tempfile -from azurelinuxagent.ga.cgroup import CpuCgroup from azurelinuxagent.common.exception import ExtensionError, ExtensionErrorCodes from azurelinuxagent.common.future import ustr +from azurelinuxagent.ga.cpucontroller import CpuControllerV1 from azurelinuxagent.ga.extensionprocessutil import format_stdout_stderr, read_output, \ wait_for_process_completion_or_timeout, handle_process_completion from tests.lib.tools import AgentTestCase, patch, data_dir @@ -52,7 +52,7 @@ def test_wait_for_process_completion_or_timeout_should_terminate_cleanly(self): stdout=subprocess.PIPE, stderr=subprocess.PIPE) - timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=5, cpu_cgroup=None) + timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=5, cpu_controller=None) self.assertEqual(timed_out, False) self.assertEqual(ret, 0) @@ -70,7 +70,8 @@ def test_wait_for_process_completion_or_timeout_should_kill_process_on_timeout(s # We don't actually mock the kill, just wrap it so we can assert its call count with patch('azurelinuxagent.ga.extensionprocessutil.os.killpg', wraps=os.killpg) as patch_kill: with patch('time.sleep') as mock_sleep: - timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=timeout, cpu_cgroup=None) + timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=timeout, + cpu_controller=None) # We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure # we're "waiting" the correct amount of time before killing the process @@ -89,7 +90,7 @@ def test_handle_process_completion_should_return_nonzero_when_process_fails(self stdout=subprocess.PIPE, stderr=subprocess.PIPE) - timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=5, cpu_cgroup=None) + timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=5, cpu_controller=None) self.assertEqual(timed_out, False) self.assertEqual(ret, 2) @@ -105,12 +106,8 @@ def test_handle_process_completion_should_return_process_output(self): stderr=stderr, preexec_fn=os.setsid) - process_output = handle_process_completion(process=process, - command=command, - timeout=5, - stdout=stdout, - stderr=stderr, - error_code=42) + process_output = handle_process_completion(process=process, command=command, timeout=5, stdout=stdout, + stderr=stderr, error_code=42) expected_output = "[stdout]\ndummy stdout\n\n\n[stderr]\ndummy stderr\n" self.assertEqual(process_output, expected_output) @@ -130,12 +127,8 @@ def test_handle_process_completion_should_raise_on_timeout(self): stderr=stderr, preexec_fn=os.setsid) - handle_process_completion(process=process, - command=command, - timeout=timeout, - stdout=stdout, - stderr=stderr, - error_code=42) + handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout, + stderr=stderr, error_code=42) # We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure # we're "waiting" the correct amount of time before killing the process and raising an exception @@ -156,9 +149,9 @@ def test_handle_process_completion_should_log_throttled_time_on_timeout(self): with patch('time.sleep') as mock_sleep: with self.assertRaises(ExtensionError) as context_manager: test_file = os.path.join(self.tmp_dir, "cpu.stat") - shutil.copyfile(os.path.join(data_dir, "cgroups", "cpu.stat_t0"), + shutil.copyfile(os.path.join(data_dir, "cgroups", "v1", "cpu.stat_t0"), test_file) # throttled_time = 50 - cgroup = CpuCgroup("test", self.tmp_dir) + cpu_controller = CpuControllerV1("test", self.tmp_dir) process = subprocess.Popen(command, # pylint: disable=subprocess-popen-preexec-fn shell=True, cwd=self.tmp_dir, @@ -167,13 +160,8 @@ def test_handle_process_completion_should_log_throttled_time_on_timeout(self): stderr=stderr, preexec_fn=os.setsid) - handle_process_completion(process=process, - command=command, - timeout=timeout, - stdout=stdout, - stderr=stderr, - error_code=42, - cpu_cgroup=cgroup) + handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout, + stderr=stderr, error_code=42, cpu_controller=cpu_controller) # We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure # we're "waiting" the correct amount of time before killing the process and raising an exception @@ -200,11 +188,7 @@ def test_handle_process_completion_should_raise_on_nonzero_exit_code(self): stderr=stderr, preexec_fn=os.setsid) - handle_process_completion(process=process, - command=command, - timeout=4, - stdout=stdout, - stderr=stderr, + handle_process_completion(process=process, command=command, timeout=4, stdout=stdout, stderr=stderr, error_code=error_code) self.assertEqual(context_manager.exception.code, error_code) diff --git a/tests/common/utils/test_flexible_version.py b/tests/common/utils/test_flexible_version.py index 7463f4f2c3..89b827bb0f 100644 --- a/tests/common/utils/test_flexible_version.py +++ b/tests/common/utils/test_flexible_version.py @@ -1,9 +1,9 @@ -import random # pylint: disable=unused-import import re import unittest from azurelinuxagent.common.utils.flexible_version import FlexibleVersion + class TestFlexibleVersion(unittest.TestCase): def setUp(self): diff --git a/tests/common/utils/test_text_util.py b/tests/common/utils/test_text_util.py index 5029cfb921..531f03752f 100644 --- a/tests/common/utils/test_text_util.py +++ b/tests/common/utils/test_text_util.py @@ -16,9 +16,7 @@ # import hashlib -import os import unittest -from distutils.version import LooseVersion as Version # pylint: disable=no-name-in-module,import-error import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.future import ustr @@ -26,15 +24,6 @@ class TestTextUtil(AgentTestCase): - def test_get_password_hash(self): - with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_passwords.txt'), 'rb') as in_file: - for data in in_file: - # Remove bom on bytes data before it is converted into string. - data = textutil.remove_bom(data) - data = ustr(data, encoding='utf-8') - password_hash = textutil.gen_password_hash(data, 6, 10) - self.assertNotEqual(None, password_hash) - def test_replace_non_ascii(self): data = ustr(b'\xef\xbb\xbfhehe', encoding='utf-8') self.assertEqual('hehe', textutil.replace_non_ascii(data)) @@ -78,23 +67,6 @@ def test_remove_bom(self): data = textutil.remove_bom(data) self.assertEqual(u" ", data) - def test_version_compare(self): - self.assertTrue(Version("1.0") < Version("1.1")) - self.assertTrue(Version("1.9") < Version("1.10")) - self.assertTrue(Version("1.9.9") < Version("1.10.0")) - self.assertTrue(Version("1.0.0.0") < Version("1.2.0.0")) - - self.assertTrue(Version("1.0") <= Version("1.1")) - self.assertTrue(Version("1.1") > Version("1.0")) - self.assertTrue(Version("1.1") >= Version("1.0")) - - self.assertTrue(Version("1.0") == Version("1.0")) - self.assertTrue(Version("1.0") >= Version("1.0")) - self.assertTrue(Version("1.0") <= Version("1.0")) - - self.assertTrue(Version("1.9") < "1.10") - self.assertTrue("1.9" < Version("1.10")) - def test_get_bytes_from_pem(self): content = ("-----BEGIN CERTIFICATE-----\n" "certificate\n" diff --git a/tests/data/cgroups/cgroup.procs b/tests/data/cgroups/cgroup.procs new file mode 100644 index 0000000000..93c25c16df --- /dev/null +++ b/tests/data/cgroups/cgroup.procs @@ -0,0 +1,3 @@ +123 +234 +345 \ No newline at end of file diff --git a/tests/data/cgroups/cpu_mount/cpuacct.stat b/tests/data/cgroups/cpu_mount/cpuacct.stat deleted file mode 100644 index dbdaec701d..0000000000 --- a/tests/data/cgroups/cpu_mount/cpuacct.stat +++ /dev/null @@ -1,2 +0,0 @@ -user 50000 -system 100000 diff --git a/tests/data/cgroups/hybrid/sys_fs_cgroup_cgroup.controllers b/tests/data/cgroups/hybrid/sys_fs_cgroup_cgroup.controllers new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/data/cgroups/proc_self_cgroup_azure_slice b/tests/data/cgroups/proc_self_cgroup_azure_slice new file mode 100644 index 0000000000..58df643b24 --- /dev/null +++ b/tests/data/cgroups/proc_self_cgroup_azure_slice @@ -0,0 +1,13 @@ +12:blkio:/azure.slice/walinuxagent.service +11:cpu,cpuacct:/azure.slice/walinuxagent.service +10:devices:/azure.slice/walinuxagent.service +9:pids:/azure.slice/walinuxagent.service +8:memory:/azure.slice/walinuxagent.service +7:freezer:/ +6:hugetlb:/ +5:perf_event:/ +4:net_cls,net_prio:/ +3:cpuset:/ +2:rdma:/ +1:name=systemd:/azure.slice/walinuxagent.service +0::/azure.slice/walinuxagent.service diff --git a/tests/data/cgroups/sys_fs_cgroup_unified_cgroup.controllers b/tests/data/cgroups/sys_fs_cgroup_unified_cgroup.controllers deleted file mode 100644 index 2a03d239de..0000000000 --- a/tests/data/cgroups/sys_fs_cgroup_unified_cgroup.controllers +++ /dev/null @@ -1,7 +0,0 @@ -io -memory -pids -perf_event -rdma -cpu -freezer \ No newline at end of file diff --git a/tests/data/cgroups/cpu.stat b/tests/data/cgroups/v1/cpu.stat similarity index 100% rename from tests/data/cgroups/cpu.stat rename to tests/data/cgroups/v1/cpu.stat diff --git a/tests/data/cgroups/cpu.stat_t0 b/tests/data/cgroups/v1/cpu.stat_t0 similarity index 100% rename from tests/data/cgroups/cpu.stat_t0 rename to tests/data/cgroups/v1/cpu.stat_t0 diff --git a/tests/data/cgroups/cpu.stat_t1 b/tests/data/cgroups/v1/cpu.stat_t1 similarity index 100% rename from tests/data/cgroups/cpu.stat_t1 rename to tests/data/cgroups/v1/cpu.stat_t1 diff --git a/tests/data/cgroups/cpuacct.stat b/tests/data/cgroups/v1/cpuacct.stat similarity index 100% rename from tests/data/cgroups/cpuacct.stat rename to tests/data/cgroups/v1/cpuacct.stat diff --git a/tests/data/cgroups/cpuacct.stat_t0 b/tests/data/cgroups/v1/cpuacct.stat_t0 similarity index 100% rename from tests/data/cgroups/cpuacct.stat_t0 rename to tests/data/cgroups/v1/cpuacct.stat_t0 diff --git a/tests/data/cgroups/cpuacct.stat_t1 b/tests/data/cgroups/v1/cpuacct.stat_t1 similarity index 100% rename from tests/data/cgroups/cpuacct.stat_t1 rename to tests/data/cgroups/v1/cpuacct.stat_t1 diff --git a/tests/data/cgroups/cpuacct.stat_t2 b/tests/data/cgroups/v1/cpuacct.stat_t2 similarity index 100% rename from tests/data/cgroups/cpuacct.stat_t2 rename to tests/data/cgroups/v1/cpuacct.stat_t2 diff --git a/tests/data/cgroups/memory_mount/memory.max_usage_in_bytes b/tests/data/cgroups/v1/memory.max_usage_in_bytes similarity index 100% rename from tests/data/cgroups/memory_mount/memory.max_usage_in_bytes rename to tests/data/cgroups/v1/memory.max_usage_in_bytes diff --git a/tests/data/cgroups/memory_mount/memory.stat b/tests/data/cgroups/v1/memory.stat similarity index 100% rename from tests/data/cgroups/memory_mount/memory.stat rename to tests/data/cgroups/v1/memory.stat diff --git a/tests/data/cgroups/missing_memory_counters/memory.stat b/tests/data/cgroups/v1/memory.stat_missing similarity index 100% rename from tests/data/cgroups/missing_memory_counters/memory.stat rename to tests/data/cgroups/v1/memory.stat_missing diff --git a/tests/data/cgroups/proc_pid_cgroup b/tests/data/cgroups/v1/proc_pid_cgroup similarity index 100% rename from tests/data/cgroups/proc_pid_cgroup rename to tests/data/cgroups/v1/proc_pid_cgroup diff --git a/tests/data/cgroups/proc_self_cgroup b/tests/data/cgroups/v1/proc_self_cgroup similarity index 100% rename from tests/data/cgroups/proc_self_cgroup rename to tests/data/cgroups/v1/proc_self_cgroup diff --git a/tests/data/cgroups/proc_stat_t0 b/tests/data/cgroups/v1/proc_stat_t0 similarity index 100% rename from tests/data/cgroups/proc_stat_t0 rename to tests/data/cgroups/v1/proc_stat_t0 diff --git a/tests/data/cgroups/proc_stat_t1 b/tests/data/cgroups/v1/proc_stat_t1 similarity index 100% rename from tests/data/cgroups/proc_stat_t1 rename to tests/data/cgroups/v1/proc_stat_t1 diff --git a/tests/data/cgroups/proc_stat_t2 b/tests/data/cgroups/v1/proc_stat_t2 similarity index 100% rename from tests/data/cgroups/proc_stat_t2 rename to tests/data/cgroups/v1/proc_stat_t2 diff --git a/tests/data/cgroups/v2/cpu.stat b/tests/data/cgroups/v2/cpu.stat new file mode 100644 index 0000000000..6fcb7b86ff --- /dev/null +++ b/tests/data/cgroups/v2/cpu.stat @@ -0,0 +1,9 @@ +usage_usec 817045397 +user_usec 742283732 +system_usec 74761665 +core_sched.force_idle_usec 0 +nr_periods 165261 +nr_throttled 162912 +throttled_usec 15735198706 +nr_bursts 0 +burst_usec 0 diff --git a/tests/data/cgroups/v2/cpu.stat_t0 b/tests/data/cgroups/v2/cpu.stat_t0 new file mode 100644 index 0000000000..6fcb7b86ff --- /dev/null +++ b/tests/data/cgroups/v2/cpu.stat_t0 @@ -0,0 +1,9 @@ +usage_usec 817045397 +user_usec 742283732 +system_usec 74761665 +core_sched.force_idle_usec 0 +nr_periods 165261 +nr_throttled 162912 +throttled_usec 15735198706 +nr_bursts 0 +burst_usec 0 diff --git a/tests/data/cgroups/v2/cpu.stat_t1 b/tests/data/cgroups/v2/cpu.stat_t1 new file mode 100644 index 0000000000..a2eaecf6e1 --- /dev/null +++ b/tests/data/cgroups/v2/cpu.stat_t1 @@ -0,0 +1,9 @@ +usage_usec 819624087 +user_usec 744545316 +system_usec 75078770 +core_sched.force_idle_usec 0 +nr_periods 165783 +nr_throttled 163430 +throttled_usec 15796563650 +nr_bursts 0 +burst_usec 0 diff --git a/tests/data/cgroups/v2/cpu.stat_t2 b/tests/data/cgroups/v2/cpu.stat_t2 new file mode 100644 index 0000000000..cca6a6e425 --- /dev/null +++ b/tests/data/cgroups/v2/cpu.stat_t2 @@ -0,0 +1,9 @@ +usage_usec 822052295 +user_usec 746640066 +system_usec 75412229 +core_sched.force_idle_usec 0 +nr_periods 166274 +nr_throttled 163917 +throttled_usec 15853013984 +nr_bursts 0 +burst_usec 0 diff --git a/tests/data/cgroups/v2/memory.events b/tests/data/cgroups/v2/memory.events new file mode 100644 index 0000000000..ee154297aa --- /dev/null +++ b/tests/data/cgroups/v2/memory.events @@ -0,0 +1,6 @@ +low 0 +high 9 +max 0 +oom 0 +oom_kill 0 +oom_group_kill 0 diff --git a/tests/data/cgroups/v2/memory.events_missing b/tests/data/cgroups/v2/memory.events_missing new file mode 100644 index 0000000000..5a5d05a345 --- /dev/null +++ b/tests/data/cgroups/v2/memory.events_missing @@ -0,0 +1,5 @@ +low 0 +max 0 +oom 0 +oom_kill 0 +oom_group_kill 0 diff --git a/tests/data/cgroups/v2/memory.peak b/tests/data/cgroups/v2/memory.peak new file mode 100644 index 0000000000..25140d458b --- /dev/null +++ b/tests/data/cgroups/v2/memory.peak @@ -0,0 +1 @@ +194494464 diff --git a/tests/data/cgroups/v2/memory.stat b/tests/data/cgroups/v2/memory.stat new file mode 100644 index 0000000000..0b0d4c52d4 --- /dev/null +++ b/tests/data/cgroups/v2/memory.stat @@ -0,0 +1,53 @@ +anon 17589300 +file 134553600 +kernel 25653248 +kernel_stack 0 +pagetables 0 +sec_pagetables 0 +percpu 726400 +sock 0 +vmalloc 0 +shmem 0 +zswap 0 +zswapped 0 +file_mapped 0 +file_dirty 12288 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 0 +active_anon 0 +inactive_file 127213568 +active_file 7340032 +unevictable 0 +slab_reclaimable 24061424 +slab_unreclaimable 0 +slab 24061424 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 128 +pgscan 56624 +pgsteal 56622 +pgscan_kswapd 56624 +pgscan_direct 0 +pgscan_khugepaged 0 +pgsteal_kswapd 56622 +pgsteal_direct 0 +pgsteal_khugepaged 0 +pgfault 3673191 +pgmajfault 1 +pgrefill 124195 +pgactivate 2 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +zswpin 0 +zswpout 0 +thp_fault_alloc 255 +thp_collapse_alloc 111 diff --git a/tests/data/cgroups/v2/memory.stat_missing b/tests/data/cgroups/v2/memory.stat_missing new file mode 100644 index 0000000000..96d43db68e --- /dev/null +++ b/tests/data/cgroups/v2/memory.stat_missing @@ -0,0 +1,51 @@ +kernel 25653248 +kernel_stack 0 +pagetables 0 +sec_pagetables 0 +percpu 726400 +sock 0 +vmalloc 0 +shmem 0 +zswap 0 +zswapped 0 +file_mapped 0 +file_dirty 12288 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 0 +active_anon 0 +inactive_file 127213568 +active_file 7340032 +unevictable 0 +slab_reclaimable 24061424 +slab_unreclaimable 0 +slab 24061424 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 128 +pgscan 56624 +pgsteal 56622 +pgscan_kswapd 56624 +pgscan_direct 0 +pgscan_khugepaged 0 +pgsteal_kswapd 56622 +pgsteal_direct 0 +pgsteal_khugepaged 0 +pgfault 3673191 +pgmajfault 1 +pgrefill 124195 +pgactivate 2 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +zswpin 0 +zswpout 0 +thp_fault_alloc 255 +thp_collapse_alloc 111 diff --git a/tests/data/cgroups/v2/memory.swap.current b/tests/data/cgroups/v2/memory.swap.current new file mode 100644 index 0000000000..b92677edb9 --- /dev/null +++ b/tests/data/cgroups/v2/memory.swap.current @@ -0,0 +1 @@ +20000 diff --git a/tests/data/cgroups/v2/proc_pid_cgroup b/tests/data/cgroups/v2/proc_pid_cgroup new file mode 100644 index 0000000000..8a1f8d0bed --- /dev/null +++ b/tests/data/cgroups/v2/proc_pid_cgroup @@ -0,0 +1 @@ +0::/system.slice/Microsoft.A.Sample.Extension_1.0.1_aeac05dc-8c24-4542-95f2-a0d6be1c5ba7.scope diff --git a/tests/data/cgroups/v2/proc_self_cgroup b/tests/data/cgroups/v2/proc_self_cgroup new file mode 100644 index 0000000000..0027b4040a --- /dev/null +++ b/tests/data/cgroups/v2/proc_self_cgroup @@ -0,0 +1 @@ +0::/system.slice/walinuxagent.service diff --git a/tests/data/cgroups/v2/proc_uptime_t0 b/tests/data/cgroups/v2/proc_uptime_t0 new file mode 100644 index 0000000000..d035316d9c --- /dev/null +++ b/tests/data/cgroups/v2/proc_uptime_t0 @@ -0,0 +1 @@ +776968.02 1495073.30 diff --git a/tests/data/cgroups/v2/proc_uptime_t1 b/tests/data/cgroups/v2/proc_uptime_t1 new file mode 100644 index 0000000000..f0660cf121 --- /dev/null +++ b/tests/data/cgroups/v2/proc_uptime_t1 @@ -0,0 +1 @@ +777350.57 1495797.44 diff --git a/tests/data/cgroups/v2/proc_uptime_t2 b/tests/data/cgroups/v2/proc_uptime_t2 new file mode 100644 index 0000000000..ae3e36aad1 --- /dev/null +++ b/tests/data/cgroups/v2/proc_uptime_t2 @@ -0,0 +1 @@ +779218.68 1499425.34 diff --git a/tests/data/cgroups/v2/sys_fs_cgroup_cgroup.subtree_control b/tests/data/cgroups/v2/sys_fs_cgroup_cgroup.subtree_control new file mode 100644 index 0000000000..c94e05c420 --- /dev/null +++ b/tests/data/cgroups/v2/sys_fs_cgroup_cgroup.subtree_control @@ -0,0 +1 @@ +cpuset cpu io memory pids diff --git a/tests/data/cgroups/v2/sys_fs_cgroup_cgroup.subtree_control_empty b/tests/data/cgroups/v2/sys_fs_cgroup_cgroup.subtree_control_empty new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/data/distro_versions.txt b/tests/data/distro_versions.txt new file mode 100644 index 0000000000..aa0bb3cbd6 --- /dev/null +++ b/tests/data/distro_versions.txt @@ -0,0 +1,1501 @@ +# +0.0.0.0_99466 +0.0.0.0_99492 +0.0.0.0_99494 +0.0.0.0_99496 +0.0.0.0_99500 +0.0.0.0_99504 +0.0.0.0_99506 +0.0.0.0_99530 +0.0.0.0_99533 +0.0.0.0_99539 +0.0.0.0_99541 +0.0.0.0_99543 +0.0.0.0_99560 +0.0.0.0_99562 +0.0.0.0_99570 +0.0.0.0_99572 +0.0.0.0_99580 +0.0.0.0_99587 +0.0.0.0_99589 +0.0.0.0_99591 +0.0.0.0_99595 +0.0.0.0_99597 +0.0.0.0_99634 +0.0.0.0_99637 +0.0.0.0_99639 +0.0.0.0_99646 +0.0.0.0_99660 +0.0.0.0_99664 +0.0.0.0_99665 +0.0.0.0_99669 +0.0.0.0_99681 +0.0.0.0_99696 +0.0.0.0_99702 +0.0.0.0_99704 +0.0.0.0_99710 +0.0.0.0_99815 +0.0.0.0_99824 +0.0.0.0_99826 +0.0.0.0_99828 +0.0.0.0_99835 +0.0.0.0_99839 +0.0.0.0_99841 +0.10.1 +0.11.1 +0.12.1 +0.13.1 +0.14.1 +0.6.1 +0.6.2 +0.6.3 +0.8.1 +0.9.1 +0.999.0.0-1093544 +1.0 +10 +10.0.1.0 +10.0.2.0 +10.0.3.0 +10.0.3.1 +10.0.4.0 +10.0.5.0 +10.0.6.0 +10.0.7.0 +10.0_RC2 +10.1 +10.10 +10.11 +10.12 +10.13 +10.2 +1.0.20210807 +1.0.20210928 +1.0.20211027 +1.0.20211230 +1.0.20220122 +1.0.20220127 +1.0.20220307 +1.0.20220331 +1.0.20220504 +1.0.20220521 +1.0.20220608 +1.0.20220709 +1.0.20220805 +1.0.20220817 +1.0.20220909 +1.0.20220926 +1.0.20221007 +1.0.20221028 +1.0.20221119 +1.0.20221202 +1.0.20221220 +1.0.20230106 +1.0.20230123 +1.0.20230208 +1.0.20230225 +1.0.20230308 +1.0.20230330 +1.0.20230414 +1.0.20230427 +1.0.20230518 +1.0.20230607 +1.0.20230615 +1.0.20230713 +1.0.20230811 +10.3 +10.4 +10.5 +10.6 +1063 +1069 +10.7 +10.8 +1084 +1086 +10.9 +11 +11.0.108.0 +11.0.93.0 +11.0.96.0 +11.1 +11.2 +11.3 +11.33 +11.4 +11.7 +11.8 +11.9 +11-updates +12 +12.0 +12.04 +12.1 +12.10 +12.10.1 +12.10.2 +12.2 +12.3 +12.4 +12.5 +12.7.2 +1.27.5 +12.8 +12.8.2 +12.9 +12.9.2 +12.9.3 +12.9.4 +12-updates +13 +13.0 +13.1 +13.10 +13.2 +13.3 +1353.7.0 +14.0 +14.04 +14.1 +14.1.0.10 +14.10.1.10 +14.10.1.11 +14.11.1.10 +14.12.1.10 +14.12.1.11 +14.13.1.10 +14.14.1.10 +14.15.1.10 +14.16.1.10 +14.2 +14.2.0.0 +14.2.0.20 +14.3.0.10 +14.3.0.20 +14.3.0.21 +14.4.0.10 +14.4.0.16 +14.4.1.10 +14.5.0.11 +14.5.0.20 +14.6.0.10 +14.6.0.20 +14.6.0.30 +14.6.1.10 +14.6.1.11 +14.7.0.20 +14.7.0.30 +14.7.0.40 +14.7.0.41 +14.7.0.50 +14.7.0.60 +14.7.1.100 +14.7.1.20 +14.7.1.31 +14.7.1.40 +14.7.1.426 +14.7.1.50 +14.7.1.60 +14.7.1.61 +14.7.1.62 +14.7.1.70 +14.7.1.71 +14.7.1.80 +14.7.1.90 +14.8.1.10 +14.9.1.10 +14.9.1.11 +1.4-rolling-202402090309 +1.4-rolling-202402241557 +15 +15.0 +15.1 +15.2 +15.3 +153.1 +15.4 +15.5 +15.6 +1576.5.0 +16.04 +16.10 +16.1-11023 +16.1-11047 +16.1-11052 +16.1-11057 +16.1-11065 +16.1-11066 +16.1-11067 +16.1-11079 +1688.5.3 +17.04 +17.10 +17.3 +18 +18.04 +18.06.4 +18.10 +1855.4.0 +1883.1.0 +19 +19.04 +19.10 +1911.1.1 +1911.3.0 +2 +2.0 +20 +20.04 +20.10 +20.10.10 +20.10.12 +20.10.13 +20.10.9 +2015.11-git +2019.2 +2.0.20220124 +2.0.20220226 +2.0.20220325 +2.0.20220403 +2.0.20220409 +2.0.20220426 +2.0.20220527 +2.0.20220617 +2.0.20220625 +2.0.20220713 +2.0.20220731 +2.0.20220804 +2.0.20220824 +2.0.20220909 +2.0.20220916 +2.0.20220921 +2.0.20221004 +2.0.20221010 +2.0.20221026 +2.0.20221029 +2.0.20221110 +2.0.20221122 +2.0.20221203 +2.0.20221215 +2.0.20221218 +2.0.20221222 +2.0.20230107 +2.0.20230126 +2.0.20230208 +2.0.20230212 +2.0.20230218 +2.0.20230303 +2.0.20230321 +2.0.20230407 +2.0.20230410 +2.0.20230426 +2.0.20230518 +2.0.20230526 +2.0.20230609 +2.0.20230611 +2.0.20230621 +2.0.20230630 +2.0.20230721 +2.0.20230805 +2.0.20230811 +2.0.20230823 +2.0.20230904 +2.0.20230924 +2.0.20231004 +2.0.20231101 +2.0.20231106 +2.0.20231115 +2.0.20231130 +2.0.20240111 +2.0.20240112 +2.0.20240117 +2.0.20240123 +2.0.20240202 +2.0.20240208 +2.0.20240209 +2.0.20240211 +2.0.20240212 +2.0.20240213 +2.0.20240214 +2.0.20240215 +2.0.20240216 +2.0.20240217 +2.0.20240218 +2.0.20240219 +2.0.20240220 +2.0.20240221 +2.0.20240222 +2.0.20240223 +2.0.20240224 +2.0.20240225 +2.0.20240226 +2.0.20240227 +2.0.20240228 +2.0.20240229 +2021.1 +2021.4 +2022.2 +2022.3 +2022.4 +2023 +2023.02.1 +2023.1 +2023.2 +2023.3 +2023.4 +2023.5.0 +2024.1 +2.1 +2.10 +21.04 +2.1.1 +2.11 +21.10 +2.1.2 +2.12 +2.1.3 +2.13 +21.3 +2135.4.0 +2.14 +2.15 +2.16 +2.17 +2.18 +2.19 +2191.5.0 +2.1-systemd-rc1 +2.2 +22 +2.2.0 +22.03 +22.04 +2.2.1 +2.21 +22.10 +22.11 +22.1.10_4 +22.1.4_1 +2.22 +2.26 +22.7.11_1 +22.7_4 +22.7.9_3 +2.3 +23 +2.30 +2303.3.0 +23.04 +23.05 +2308a +2308b +2.31 +23.10 +23.10.2 +23.11 +23.1.11 +23.1.11_2 +23.1.1_2 +23.1.2 +23.1_6 +23.1.7_3 +23.1.8 +23.4.2_4 +2345.3.0 +2345.3.1 +23.7.10_1 +23.7.11 +23.7.12 +23.7.12_5 +23.7.1_3 +23.7.4 +23.7.5 +23.7.6 +23.7.9 +2.3.91 +2.4 +24 +24.04 +24.05 +24.1.1 +24.1_1 +2411.1.0 +2411.1.1 +24.1.2 +24.1.2_1 +2430.0.0 +2466.0.0 +2492.0.0 +2.5 +2512.1.0 +2512.2.0 +2512.3.0 +2512.4.0 +2512.5.0 +2513.0.0 +2513.0.1 +2513.1.0 +2513.2.0 +2513.3.0 +2.5.4 +2.5-5155 +2.5-5193 +2.5-5201 +2.5-5202 +2.5-5204 +2592.0.0 +2.6 +2605.0.0 +2605.1.0 +2605.10.0 +2605.11.0 +2605.12.0 +2605.2.0 +2605.3.0 +2605.4.0 +2605.5.0 +2605.6.0 +2605.7.0 +2605.8.0 +2605.9.0 +2632.0.0 +2632.1.0 +2643.0.0 +2643.1.0 +2643.1.1 +2661.0.0 +2671.0.0 +2697.0.0 +2705.0.0 +2705.1.0 +2705.1.1 +2705.1.2 +2723.0.0 +2748.0.0 +2765.0.0 +2765.1.0 +2765.2.0 +2765.2.1 +2765.2.2 +2765.2.3 +2765.2.4 +2765.2.5 +2765.2.6 +2783.0.0 +2.8 +2801.0.0 +2801.0.1 +2801.1.0 +2823.0.0 +2823.1.0 +2823.1.1 +2823.1.2 +2823.1.3 +2857.0.0 +2879.0.0 +2879.0.1 +2.9 +29 +2905.0.0 +2905.1.0 +2905.2.0 +2905.2.1 +2905.2.2 +2905.2.3 +2905.2.4 +2905.2.5 +2905.2.6 +2920.0.0 +2920.1.0 +2942.0.0 +2942.1.0 +2942.1.1 +2942.1.2 +2955.0.0 +2969.0.0 +2983.0.0 +2983.1.0 +2983.1.1 +2983.1.2 +2983.2.0 +2983.2.1 +3 +3.0 +3.0.0.448 +3.0.0.480 +3005.0.0 +3005.0.1 +3.0.310-6230 +3.0.310-6235 +3.0.310-6240 +3.0.310-6242 +3.0.310-6250 +3.0.310-6252 +3.0.310-6264 +3033.0.0 +3033.1.0 +3033.1.1 +3033.2.0 +3033.2.1 +3033.2.2 +3033.2.3 +3033.2.4 +3033.3.0 +3033.3.1 +3033.3.10 +3033.3.11 +3033.3.12 +3033.3.13 +3033.3.14 +3033.3.15 +3033.3.16 +3033.3.17 +3033.3.18 +3033.3.2 +3033.3.3 +3033.3.4 +3033.3.5 +3033.3.6 +3033.3.7 +3033.3.8 +3033.3.9 +3046.0.0 +3066.0.0 +3066.1.0 +3066.1.1 +3066.1.2 +3.10.3 +3.11.0 +3.11.0-20240102t2200edt-tagged +3.11.2-dev20240209t1755utc-autotag +3.11.2-dev20240212t1512utc-autotag +3.11.2-dev20240212t2004utc-autotag +3.11.2-dev20240212t2307utc-autotag +3.11.2-dev20240213t0602utc-autotag +3.11.2-dev20240214t1413utc-autotag +3.11.2-rc.1 +3.11.2-rc.2 +3.11.2-rc.3 +3.11.2-rc.4 +3115.0.0 +3.12.0 +3.1.22-1.8 +3127.0.0 +3139.0.0 +3139.1.0 +3139.1.1 +3139.2.0 +3139.2.1 +3139.2.2 +3139.2.3 +3.14.2 +3.15.0 +3.15.10 +3.15.11 +3.15.4 +3.15.7 +3.15.8 +3.15.9 +3.16.2 +3.16.4 +3165.0.0 +3.17.1 +3.17.7 +3.18.0 +3.18.5 +3185.0.0 +3185.1.0 +3185.1.1 +32 +3200.0.0 +3227.0.0 +3227.1.0 +3227.1.1 +3227.2.1 +3227.2.2 +3227.2.3 +3227.2.4 +3255.0.0 +3277.0.0 +3277.1.0 +3277.1.1 +3277.1.2 +33 +3305.0.0 +3305.0.1 +3.3.2009 +3.3.4 +3346.0.0 +3346.1.0 +3374.0.0 +3374.1.0 +3374.1.1 +3374.2.0 +3374.2.1 +3374.2.2 +3374.2.3 +3374.2.4 +3374.2.5 +34 +3402.0.0 +3402.0.1 +3402.1.0 +3417.0.0 +3417.1.0 +3432.0.0 +3432.1.0 +3446.0.0 +3446.1.0 +3446.1.1 +3480.0.0 +3493.0.0 +3493.1.0 +3.5 +35 +3.5.0 +3510.0.0 +3510.1.0 +3510.2.0 +3510.2.1 +3510.2.2 +3510.2.3 +3510.2.4 +3510.2.5 +3510.2.6 +3510.2.7 +3510.2.8 +3510.3.1 +3510.3.2 +3.5.2-dev20230505t0041edt-manual +3535.0.0 +3549.0.0 +3549.1.0 +3549.1.1 +3.5.5 +3.5.6 +3572.0.0 +3572.0.1 +3572.1.0 +36 +3602.0.0 +3602.1.0 +3602.1.1 +3602.1.2 +3602.1.3 +3602.1.4 +3602.1.5 +3602.1.6 +3602.2.0 +3602.2.1 +3602.2.2 +3602.2.3 +3619.0.0 +3637.0.0 +3654.0.0 +3665.0.0 +3689.0.0 +37 +3717.0.0 +3732.0.0 +3745.1.0 +3760.0.0 +3760.1.0 +3760.1.1 +3760.2.0 +3794.0.0 +38 +3815.0.0 +3815.1.0 +3815.2.0 +3850.0.0 +3850.1.0 +3874.0.0 +3878.0.0 +3885.0.0 +3886.0.0 +3888.0.0 +3892.0.0 +39 +4 +4.0 +40 +41 +42.3 +4.24.3.1 +4.24.3.2 +4.26.1.1 +4.27.0 +4.27.3 +4.32 +4.33 +4.3.3-117 +4.6 +4.7 +5.0 +5.1 +5.10.0-18-cloud-amd64 +5.11 +5.2 +5.3 +5.4 +5.4.0.00198 +5.4.1.00026 +5.4.1.00056 +5.6 +6 +6.0 +6.0.0.beta4 +6.1 +6.10 +6.10.0 +6.11.0 +6.11.1 +6.11.2 +6.11.3 +6.11.4 +6.11.5 +6.11.6 +6.11.7 +6.12.0 +6.1.22 +6.13.0 +6.14.0 +6.2 +6.3 +6.4 +6.5 +6.5.0 +6.5.4 +6.5.5 +6.5.6 +6.5.7 +6.6 +6.7 +6.7.2 +6.8 +6.8.2 +6.9 +6.9.1 +6.9.2 +7 +7.0 +7.0.1 +7.0.1406 +7.1 +7.10.0.0-1017741 +7.10.0.20-1023227 +7.10.1.0-1042928 +7.10.1.10-1068159 +7.10.1.1-1049892 +7.10.1.15-1078832 +7.10.1.20-1090468 +7.11 +7.11.0.0-1035502 +7.1.1503 +7.12.0.0-1053185 +7.13.0.10-1078781 +7.13.0.20-1082704 +7.13.1.0-1085623 +7.13.1.0-1093040 +7.13.1.0-1093865 +7.2 +7.2.0 +7.2.1511 +7.3 +7.3.1611 +7.4 +7.4.1708 +7.5 +7.5.0.10-680584 +7.5.1804 +7.6 +7.6.1810 +7.7 +7.7.0.7-1007134 +7.7.1.0-1007743 +7.7.1908 +7.7.5.11-1046187 +7.7.5.20-1063368 +7.7.5.25-1078970 +7.7.5.30-1089690 +7.7.5.30-1091295 +7.8 +7.8.0.0-1008134 +7.8.0.10-1009761 +7.8.0.20-1011246 +7.8.0.8.0 +7.8.1.7.0 +7.8.2003 +7.8.2.1 +7.9 +7.9.0.0-1011258 +7.9.2009 +8 +8. +8.0 +8.0.0.0-1091527 +8.0.0.0-1091581 +8.0.0.0-1091682 +8.0.0.0-1091972 +8.0.0.0-1092170 +8.0.0.0-1092707 +8.0.0.0-1092873 +8.0.0.0-1093024 +8.0.0.0-1093042 +8.0.0.0-1093255 +8.0.0.0-1094303 +8.0.1905 +8.1 +8.1.0 +8.10 +8.1.0.0-1092701 +8.1.0.0-1093328 +8.11 +8.1.1911 +8.1.3-p1-24838 +8.1.3-p2-24912 +8.1.3-p3-24955 +8.1.3-p4-25026 +8.1.3-p5-25104 +8.1.3-p6-25199 +8.1.3-p7-25298 +8.1.3-p8-25333 +8.1.3-p8-25334 +8.1.3-p8-25335 +8.1.3-p8-25336 +8.1.3-p8-25339 +8.1.3-p8-25341 +8.1.3-p8-25342 +8.1.3-p8-25343 +8.1.3-p8-25345 +8.1.3-p8-25349 +8.1.3-p8-25350 +8.1.3-p8-25351 +8.1.3-p8-25352 +8.1.3-p8-25353 +8.1.3-p8-25354 +8.1.3-p8-25355 +8.1.3-p8-25356 +8.1.3-p8-25357 +8.1.3-p8-25360 +8.1.3-p8-25361 +8.1.3-p8-25362 +8.1.3-p8-25363 +8.1.3-p8-25364 +8.1.3-p8-25365 +8.1.3-p8-25366 +8.1.3-p8-25367 +8.1.3-p8-25370 +8.1.3-p8-25371 +8.1.3-p8-25372 +8.1.3-p8-25373 +8.1.3-p8-25375 +8.1.3-p8-25376 +8.1.3-p8-khil.un-08415223c9a99546b566df0dbc683ffa378cfd77 +8.1.3-p8-khil.un-29562fd3e583d0b1529db6f92fedf409aec35c53 +8.1.3-p8-khil.un-7802727eceff485a5339f081ba97c8eccc697c62 +8.1.4-p1-25119 +8.2 +8.2.2004 +8.3 +8.3.0.6_87213 +8.3.2011 +8.3.2.1_85580 +8.3.2.2_85607 +8.3.3 +8.3.8.0_86519 +8.3.8.0_86525 +8.4 +8.4.1 +8.4.2 +8.4.2105 +8.4.3 +8.5 +8.5.0 +8.5.1 +8.5.2 +8.5.2111 +8.5.8 +8.6 +8.6.2 +8.6.3 +8.6.7 +8.7 +8.8 +8.8.1 +8.9 +9 +9.0 +9.0.0-p1-24746 +9.0.0-p2-24858 +9.0.1-24945 +9.0.1-p1-25067 +9.0.2-25173 +9.0.2-p1-25268 +9.0.3-25350 +9.0.3-p1-25395 +9.0.3-p1-25397 +9.0.3-p1-25398 +9.0.3-p1-25399 +9.0.3-p1-25400 +9.0.3-p1-25402 +9.0.3-p1-25405 +9.0.3-p1-25406 +9.0.3-p1-abhinav.agarwal-18771999cdf52e2eb4cac4515764035f673da0b4 +9.0.3-p1-khil.un-33723dc9b6a306de91bc2a9fcc7768810f1457bf +9.0.3-p2-25407 +9.0.3-p2-25408 +9.0.3-p2-25410 +9.0.3-p2-25411 +9.0.3-p2-25413 +9.0.3-p2-25414 +9.0.3-p2-25415 +9.0.3-p2-25416 +9.0.3-p2-25417 +9.0.3-p2-25418 +9.0.3-p2-25421 +9.0.3-p2-25422 +9.0.3-p2-25423 +9.0.3-p2-25424 +9.0.3-p2-25425 +9.0.3-p2-25426 +9.0.3-p2-25427 +9.0.3-p2-25428 +9.0.3-p2-25429 +9.0.3-p2-25430 +9.0.3-p2-25431 +9.0.3-p2-25432 +9.0.3-p2-25433 +9.0.3-p2-25434 +9.0.3-p2-25436 +9.0.3-p2-25437 +9.0.3-p2-25439 +9.0.3-p2-25440 +9.0.3-p2-25441 +9.0.3-p2-25442 +9.0.3-p2-25444 +9.0.3-p2-25445 +9.0.3-p2-khil.un-2bf873fb17f994904dcf673399774dc8b9c79c12 +9.0.3-p2-khil.un-ac0b199a717c00707168ad80f8e9611d3f821deb +9.0.3-p3-25446 +9.0.3-p3-25447 +9.0.3-p3-25448 +9.0.3-p3-25449 +9.0.3-p3-25450 +9.0.3-p3-25451 +9.0.3-p3-25452 +9.0.4-25401 +9.0.4-25403 +9.0.4-25435 +9.0.4-25443 +9.1 +9.1.0-27191 +9.1.0-beta5-25477 +9.1.0-beta5-25490 +9.1.0-p1-27296 +9.1.0-p1-27298 +9.1.0-p1-27302 +9.1.0-p1-27309 +9.1.0-p1-27330 +9.1.0-p1-khil.un-c49044ca59c0bc1edf7921109c15878ad8d6b9ff +9.1.0-p2-27361 +9.1.0-p2-27365 +9.1.0-p2-27367 +9.1.0-p2-27369 +9.1.0-p2-27372 +9.1.0-p2-27377 +9.1.0-p2-27379 +9.1.0-p2-27382 +9.1.0-p2-27395 +9.1.0-p2-27400 +9.1.0-p2-27401 +9.1.0-p2-27402 +9.1.0-p2-27403 +9.1.0-p2-27404 +9.1.0-p2-27405 +9.1.0-p2-27406 +9.1.0-p2-27407 +9.1.0-p2-27409 +9.1.0-p2-27418 +9.1.0-p2-khil.un-50de36250e4d05c520fadf4c780da5af8f82f52c +9.1.0-p2-khil.un-713fe3c6fb797ad684383ebda90a00cbca5e2531 +9.11 +9.1.10.0_92772 +9.1.11.0_92806 +9.1.1-27295 +9.1.1-27297 +9.1.1-27299 +9.1.1-27300 +9.1.1-27301 +9.1.1-27303 +9.1.1-27305 +9.1.1-27307 +9.1.1-27308 +9.1.1-27310 +9.1.1-27311 +9.1.1-27312 +9.1.1-27313 +9.1.1-27315 +9.1.1-27318 +9.1.1-27319 +9.1.1-27320 +9.1.1-27321 +9.1.1-27322 +9.1.1-27323 +9.1.1-27324 +9.1.1-27325 +9.1.1-27326 +9.1.1-27327 +9.1.1-27331 +9.1.1-27332 +9.1.1-27334 +9.1.1-27335 +9.1.1-27336 +9.1.1-27337 +9.1.1-27339 +9.1.1-27340 +9.1.1-27341 +9.1.1-27343 +9.1.1-27344 +9.1.1-27345 +9.1.1-27346 +9.1.1-27347 +9.1.1-27348 +9.1.1-27349 +9.1.1-27350 +9.1.1-27351 +9.1.1-27352 +9.1.1-27354 +9.1.1-27355 +9.1.1-27356 +9.1.1-27357 +9.1.1-27358 +9.1.1-27359 +9.1.1-27360 +9.1.1-27362 +9.1.1-27363 +9.1.1-27364 +9.1.1-27366 +9.1.1-27368 +9.1.1-27374 +9.1.1-27376 +9.1.1-27378 +9.1.1-27380 +9.1.1-27381 +9.1.1-27383 +9.1.1-27385 +9.1.1-27387 +9.1.1-27388 +9.1.1-27393 +9.1.1-27394 +9.1.1-27396 +9.1.1-27397 +9.1.1-27398 +9.1.1-27399 +9.1.1-27408 +9.1.1-27410 +9.1.1-27411 +9.1.1-27412 +9.1.1-27413 +9.1.1-27414 +9.1.1-27415 +9.1.1-27416 +9.1.1-27417 +9.1.1-27419 +9.1.1-beta1-27328 +9.1.1-beta1-27329 +9.1.1-beta1-27338 +9.1.1-khil.un-bce7cbcae9cc06a03b1f888f0ed88ed6818c2d66 +9.1.1-khil.un-dcc75475f02643571e902b5c2c82c25fce65dc63 +9.1.1-nagadeesh.nagaraja-a9b923254f67e1ed0a2f9100900f73985854cf55 +9.12 +9.13 +9.1.3.0_92242 +9.13.1 +9.13.1P1 +9.13.1P2 +9.13.1P3 +9.13.1P4 +9.13.1P6 +9.13.1P7 +9.13.1P8X1 +9.13.1RC1 +9.14.0 +9.14.0P1 +9.14.0P2 +9.14.0P3 +9.14.1 +9.1.4.1_92329 +9.14.1P1 +9.14.1P1X3 +9.14.1P1X4 +9.14.1RC1 +9.1.4.2_92345 +9.1.4.2_92359 +9.1.4.3_92414 +9.1.4.4_92466 +9.1.4.4_92470 +9.15.0 +9.1.5.0_92545 +9.15.1X12 +9.15.1X15 +9.1.6.0_92628 +9.1.6.2_92634 +9.1.6.2_92636 +9.1.7.0_92666 +9.1.8.0_92706 +9.1-dev-25121 +9.1-dev-25368 +9.2 +9.2.0-beta1-25971 +9.2.0-beta1-26005 +9.2.0-beta1-26033 +9.2.0-beta1-26066 +9.2.0-beta2-26101 +9.2.1 +9.2.2.0_94322 +9.2.3.0_94541 +9.2.4.0_94650 +9.2.4.0_94654 +9.2.5.0_94689 +9.2.5.1_94697 +9.2.6.0_94722 +9.2.7.0_94752 +9.2.8.0_94809 +9.2.8.0_94811 +9.2.9.0_94890 +9.2-dev-25813 +9.2-dev-25878 +9.2-dev-25879 +9.2-dev-25920 +9.2-dev-25946 +9.2-dev-25947 +9.2-dev-25948 +9.2-dev-25949 +9.2-dev-25950 +9.2-dev-25951 +9.2-dev-25952 +9.2-dev-25953 +9.2-dev-25954 +9.2-dev-25955 +9.2-dev-25956 +9.2-dev-25958 +9.2-dev-25959 +9.2-dev-25960 +9.2-dev-25961 +9.2-dev-25962 +9.2-dev-25963 +9.2-dev-25965 +9.2-dev-25966 +9.2-dev-25968 +9.2-dev-25969 +9.2-dev-25970 +9.2-dev-25972 +9.2-dev-25974 +9.2-dev-25975 +9.2-dev-25976 +9.2-dev-25977 +9.2-dev-25978 +9.2-dev-25979 +9.2-dev-25980 +9.2-dev-25982 +9.2-dev-25983 +9.2-dev-25984 +9.2-dev-25985 +9.2-dev-25986 +9.2-dev-25987 +9.2-dev-25988 +9.2-dev-25989 +9.2-dev-25990 +9.2-dev-25991 +9.2-dev-25992 +9.2-dev-25993 +9.2-dev-25994 +9.2-dev-25995 +9.2-dev-25996 +9.2-dev-25999 +9.2-dev-26000 +9.2-dev-26001 +9.2-dev-26002 +9.2-dev-26003 +9.2-dev-26009 +9.2-dev-26013 +9.2-dev-26014 +9.2-dev-26016 +9.2-dev-26017 +9.2-dev-26018 +9.2-dev-26019 +9.2-dev-26020 +9.2-dev-26021 +9.2-dev-26022 +9.2-dev-26023 +9.2-dev-26024 +9.2-dev-26025 +9.2-dev-26027 +9.2-dev-26028 +9.2-dev-26029 +9.2-dev-26030 +9.2-dev-26031 +9.2-dev-26032 +9.2-dev-26034 +9.2-dev-26036 +9.2-dev-26037 +9.2-dev-26038 +9.2-dev-26039 +9.2-dev-26040 +9.2-dev-26041 +9.2-dev-26042 +9.2-dev-26044 +9.2-dev-26046 +9.2-dev-26047 +9.2-dev-26048 +9.2-dev-26050 +9.2-dev-26052 +9.2-dev-26058 +9.2-dev-26060 +9.2-dev-26061 +9.2-dev-26062 +9.2-dev-26063 +9.2-dev-26064 +9.2-dev-26065 +9.2-dev-26067 +9.2-dev-26070 +9.2-dev-26071 +9.2-dev-26075 +9.2-dev-26077 +9.2-dev-26078 +9.2-dev-26079 +9.2-dev-26080 +9.2-dev-26081 +9.2-dev-26082 +9.2-dev-26083 +9.2-dev-26085 +9.2-dev-26086 +9.2-dev-26087 +9.2-dev-26088 +9.2-dev-26089 +9.2-dev-26090 +9.2-dev-26091 +9.2-dev-26093 +9.2-dev-26094 +9.2-dev-26095 +9.2-dev-26096 +9.2-dev-26097 +9.2-dev-26098 +9.2-dev-26104 +9.2-dev-26105 +9.2-dev-26107 +9.2-dev-26108 +9.2-dev-26109 +9.2-dev-26110 +9.2-dev-26111 +9.2-dev-adi.kris-33a772ca61f67a24283d4e71a63650282d6bd073 +9.2-dev-khil.un-6ec1bfcc230e848a0e8f1d776d0f05a35a9545e6 +9.2-dev-khil.un-c35b47d1656fd20c0ec0d6cab8583ffbf6041937 +9.2-dev-khil.un-c54da2af2e5732bee11b720c199e16fd70438968 +9.2-dev-michael.sun-ec36214183ee10fbe28d86a55b3aa46b54eb4a04 +9.3 +9.3.0.0_95721 +9.3.1.0_95994 +9.3.2.0_96093 +9.3.2.1_96098 +9.3.2.2_96105 +9.3.2.3_96127 +9.4 +9.4.1.0_98030 +9.4.1.0_98069 +9.4.2.0_98303 +9.4.2.0_98396 +9.5 +9.6 +9.7 +9.8 +9.9 +9999.0.1 +9999.9.1 +a +Accops +ArrayOS +Aruba +bookworm/sid +bullseye/sid +buster/sid +Clawhammer__9.14.0 +Clawhammer__9.14.1 +Cloudstream__9.16.0 +Epicor +FFFF +h +ip-12.1.6 +ip-13.1.4 +ip-13.1.4.1 +ip-13.1.5 +ip-13.1.5.1 +ip-14.1.4 +ip-14.1.4.1 +ip-14.1.4.2 +ip-14.1.4.4 +ip-14.1.4.5 +ip-14.1.4.6 +ip-14.1.5.1 +ip-14.1.5.2 +ip-14.1.5.3 +ip-14.1.5.4 +ip-14.1.5.5 +ip-14.1.5.6 +ip-15.1.10 +ip-15.1.10.2 +ip-15.1.10.3 +ip-15.1.2.1 +ip-15.1.3 +ip-15.1.3.1 +ip-15.1.4 +ip-15.1.5 +ip-15.1.5.1 +ip-15.1.6.1 +ip-15.1.7 +ip-15.1.8 +ip-15.1.8.1 +ip-15.1.8.2 +ip-15.1.9.1 +ip-16.0.1.1 +ip-16.0.1.2 +ip-16.1.0 +ip-16.1.1 +ip-16.1.2.1 +ip-16.1.2.2 +ip-16.1.3 +ip-16.1.3.1 +ip-16.1.3.2 +ip-16.1.3.3 +ip-16.1.3.4 +ip-16.1.3.5 +ip-16.1.4 +ip-16.1.4.1 +ip-16.1.4.2 +ip-16.1.5 +ip-17.0.0 +ip-17.1.0 +ip-17.1.0.1 +ip-17.1.0.2 +ip-17.1.0.3 +ip-17.1.1 +ip-17.1.1.1 +ip-17.1.1.2 +ip-17.5.0 +jessie/sid +JNPR-11.0-20200922.4042921_buil +JNPR-11.0-20201028.e1cef1d_buil +JNPR-11.0-20201221.5316c2e_buil +JNPR-11.0-20210220.a5d6a89_buil +JNPR-11.0-20210429.58e41ab_buil +JNPR-11.0-20210618.f43645e_buil +JNPR-12.1-20211216.232802__ci_f +JNPR-12.1-20220202.9885091_buil +JNPR-12.1-20220221.2b3c81a_buil +JNPR-12.1-20220228.82e60e3_buil +JNPR-12.1-20220817.0361d5f_buil +JNPR-12.1-20220817.43c4e23_buil +JNPR-12.1-20221021.a9737e1_buil +JNPR-12.1-20230120.6bab16a_buil +JNPR-12.1-20230321.be5f9c0_buil +JNPR-12.1-20230821.5fbe894_buil +JNPR-12.1-20231013.108e0b3_buil +JNPR-12.1-20231013.32ed862a0f7_ +JNPR-12.1-20231122.ee0e992_buil +JNPR-12.1-20231220.32ed862a0f7_ +JNPR-12.1-20240103.68b4802_buil +JNPR-12.1-20240112.32ed862a0f7_ +JNPR-12.1-20240119.32ed862a0f7_ +JNPR-12.1-20240228.033525_kahon +JNPR-15.0-20240118.32ed862a0f7_ +JNPR-15.0-20240207.32ed862a0f7_ +JNPR-15.0-20240209.212337_yhli_ +JNPR-15.0-20240221.32ed862a0f7_ +JNPR-15.0-20240224.002811_kahon +kali-rolling +leap-15.0 +leap-15.1 +leap-15.2 +leap-15.3 +leap-15.4 +leap-15.5 +Libraesva +lighthouse-23.10.0 +lighthouse-23.10.1 +lighthouse-23.10.2 +lighthouse-24.02.0 +lighthouse-24.02.0p0 +lighthouse-24.05.0p0 +Lighthouse__9.13.1 +Linux +linux-os-31700 +linux-os-31810 +linux-os-31980 +linux-os-36200 +linux-os-38790 +micro-5.5 +Mightysquirrel__9.15.0 +Mightysquirrel__9.15.1 +n/a +NAME="SLES" +ngfw-6.10.11.26551.azure.1 +ngfw-6.10.12.26603 +ngfw-6.10.13.26655.fips.2 +ngfw-6.10.14.26703 +ngfw-6.10.15.26752 +ngfw-7.0.3.28152.sip.2 +ngfw-7.1.1.29059 +ngfw-7.1.2.29102 +ngfw-7.2.0.30046.pppoe.1 +ngfw-7.2.0.30046.rnext-g02c2c7f.2402121309 +ngfw-7.2.0.30046.rnext-gf1bf778.2402120824 +ngfw-7.2.0.30047.rnext-g030ce90.2402141429 +ngfw-7.2.0.30047.rnext-g2e7c78f.2402150842 +ngfw-7.2.0.30047.rnext-g3f3db02.2402211419 +ngfw-7.2.0.30047.rnext-g58dccd6.2402151047 +ngfw-7.2.0.30047.rnext-g5d6e00a.2402212007 +ngfw-7.2.0.30047.rnext-gbd58266.2402140855 +ngfw-7.2.0.30047.rnext-gc7730bf.2402151240 +ngfw-7.2.0.30047.rnext-ge9c5065.2402192008 +ngfw-7.2.0.30048.rnext-g237a2a5.2402222007 +ngfw-7.2.0.30048.rnext-g9219487.2402260818 +ngfw-7.2.0.30048.rnext-gbfc76a4.2402261313 +ngfw-7.2.0.30048.rnext-gef6caea.2402260525 +ngfw-7.2.0.30049 +ngfw-7.2.0.30050 +ngfw-7.2.0.30050.rnext-g4152526.2402281323 +ngfw-7.2.0.30050.rnext-gb6d2048.2402291318 +ngfw-7.2.0.30050.rnext-ge84f515.2402291054 +None +PanOS +r11427-9ce6aa9d8d +rolling +Schipperke-4857 +SonicOSX 7.1.1-7038-R5354 +SonicOSX 7.1.1-7040-R2998-HF24239 +SonicOSX 7.1.1-7040-R5387 +SonicOSX 7.1.1-7040-R5389 +SonicOSX 7.1.1-7040-R5391 +SonicOSX 7.1.1-7041-R5415 +SonicOSX 7.1.1-7047-R3003-HF24239 +SonicOSX 7.1.1-7047-R5557 +SonicOSX 7.1.1-7047-R5573 +SonicOSX 7.1.1-7047-R5582 +SonicOSX 7.1.1-7047-R5587 +SonicOSX 7.1.1-7048-D14445 +SonicOSX 7.1.1-7049-D14628 +SonicOSX 7.1.1-7049-R5589 +stretch/sid +testing/unstable +trixie/sid +tumbleweed-20230902 +tumbleweed-20240106 +unstable +v3.3 +v3.4.1 +v3.5 +v3.8.1 +vsbc-x86_pi3-6.10.3 +vsbc-x86_pi3-6.10.x6 +vsbc-x86_pi3-6.12.2pre02 diff --git a/tests/data/hostgaplugin/ext_conf-agent_family_version.xml b/tests/data/hostgaplugin/ext_conf-agent_family_version.xml index 5c9e0028fe..a277db3d7c 100644 --- a/tests/data/hostgaplugin/ext_conf-agent_family_version.xml +++ b/tests/data/hostgaplugin/ext_conf-agent_family_version.xml @@ -64,7 +64,7 @@ "runtimeSettings": [ { "handlerSettings": { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": {"GCS_AUTO_CONFIG":true} } @@ -77,7 +77,7 @@ "runtimeSettings": [ { "handlerSettings": { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": {"enableGenevaUpload":true} } diff --git a/tests/data/hostgaplugin/ext_conf-rsm_version_properties_false.xml b/tests/data/hostgaplugin/ext_conf-rsm_version_properties_false.xml index e1f1d6ba8c..6590c562d5 100644 --- a/tests/data/hostgaplugin/ext_conf-rsm_version_properties_false.xml +++ b/tests/data/hostgaplugin/ext_conf-rsm_version_properties_false.xml @@ -64,7 +64,7 @@ "runtimeSettings": [ { "handlerSettings": { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": {"GCS_AUTO_CONFIG":true} } @@ -77,7 +77,7 @@ "runtimeSettings": [ { "handlerSettings": { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": {"enableGenevaUpload":true} } diff --git a/tests/data/hostgaplugin/ext_conf.xml b/tests/data/hostgaplugin/ext_conf.xml index 8ede27f8a0..0e3dec4c83 100644 --- a/tests/data/hostgaplugin/ext_conf.xml +++ b/tests/data/hostgaplugin/ext_conf.xml @@ -58,7 +58,7 @@ "runtimeSettings": [ { "handlerSettings": { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent==", "publicSettings": {"GCS_AUTO_CONFIG":true} } @@ -71,7 +71,7 @@ "runtimeSettings": [ { "handlerSettings": { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent==", "publicSettings": {"enableGenevaUpload":true} } diff --git a/tests/data/hostgaplugin/vm_settings-agent_family_version.json b/tests/data/hostgaplugin/vm_settings-agent_family_version.json index 734cc8147b..99d435e51a 100644 --- a/tests/data/hostgaplugin/vm_settings-agent_family_version.json +++ b/tests/data/hostgaplugin/vm_settings-agent_family_version.json @@ -60,7 +60,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": "{\"GCS_AUTO_CONFIG\":true}" } @@ -78,7 +78,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": "{\"enableGenevaUpload\":true}" } diff --git a/tests/data/hostgaplugin/vm_settings-difference_in_required_features.json b/tests/data/hostgaplugin/vm_settings-difference_in_required_features.json index 71cdbf5c55..f36524e280 100644 --- a/tests/data/hostgaplugin/vm_settings-difference_in_required_features.json +++ b/tests/data/hostgaplugin/vm_settings-difference_in_required_features.json @@ -56,7 +56,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": "{\"GCS_AUTO_CONFIG\":true}" } @@ -76,7 +76,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": "{\"enableGenevaUpload\":true}" } diff --git a/tests/data/hostgaplugin/vm_settings-out-of-sync.json b/tests/data/hostgaplugin/vm_settings-out-of-sync.json index 0d4806af9d..d971bcaa8a 100644 --- a/tests/data/hostgaplugin/vm_settings-out-of-sync.json +++ b/tests/data/hostgaplugin/vm_settings-out-of-sync.json @@ -56,7 +56,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": "{\"GCS_AUTO_CONFIG\":true}" } diff --git a/tests/data/hostgaplugin/vm_settings-requested_version_properties_false.json b/tests/data/hostgaplugin/vm_settings-requested_version_properties_false.json index 3a6eb8b1a5..d902d94719 100644 --- a/tests/data/hostgaplugin/vm_settings-requested_version_properties_false.json +++ b/tests/data/hostgaplugin/vm_settings-requested_version_properties_false.json @@ -60,7 +60,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": "{\"GCS_AUTO_CONFIG\":true}" } @@ -78,7 +78,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==", "publicSettings": "{\"enableGenevaUpload\":true}" } diff --git a/tests/data/hostgaplugin/vm_settings.json b/tests/data/hostgaplugin/vm_settings.json index 1f6d44debc..dffac88966 100644 --- a/tests/data/hostgaplugin/vm_settings.json +++ b/tests/data/hostgaplugin/vm_settings.json @@ -56,7 +56,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent==", "publicSettings": "{\"GCS_AUTO_CONFIG\":true}" } @@ -76,7 +76,7 @@ "settingsSeqNo": 0, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent==", "publicSettings": "{\"enableGenevaUpload\":true}" } @@ -192,7 +192,7 @@ "isMultiConfig": false, "settings": [ { - "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F", + "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9", "protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpddesZQewdDBgegkxNzA1BgoJkgergres/Microsoft.OSTCExtensions.VMAccessForLinux==" } ] diff --git a/tests/data/wire/certs-2.xml b/tests/data/wire/certs-2.xml index 66a231ee87..e58e0aeacd 100644 --- a/tests/data/wire/certs-2.xml +++ b/tests/data/wire/certs-2.xml @@ -1,85 +1,85 @@ 2012-11-30 - 5 + 1 Pkcs7BlobWithPfxContents - MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAUiF8ZYMs9mMa8 -QOEMxDaIhGza+0IwDQYJKoZIhvcNAQEBBQAEggEAQW7GyeRVEhHSU1/dzV0IndH0 -rDQk+27MvlsWTcpNcgGFtfRYxu5bzmp0+DoimX3pRBlSFOpMJ34jpg4xs78EsSWH -FRhCf3EGuEUBHo6yR8FhXDTuS7kZ0UmquiCI2/r8j8gbaGBNeP8IRizcAYrPMA5S -E8l1uCrw7DHuLscbVni/7UglGaTfFS3BqS5jYbiRt2Qh3p+JPUfm51IG3WCIw/WS -2QHebmHxvMFmAp8AiBWSQJizQBEJ1lIfhhBMN4A7NadMWAe6T2DRclvdrQhJX32k -amOiogbW4HJsL6Hphn7Frrw3CENOdWMAvgQBvZ3EjAXgsJuhBA1VIrwofzlDljCC -DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIxcvw9qx4y0qAgg0QrINXpC23BWT2 -Fb9N8YS3Be9eO3fF8KNdM6qGf0kKR16l/PWyP2L+pZxCcCPk83d070qPdnJK9qpJ -6S1hI80Y0oQnY9VBFrdfkc8fGZHXqm5jNS9G32v/AxYpJJC/qrAQnWuOdLtOZaGL -94GEh3XRagvz1wifv8SRI8B1MzxrpCimeMxHkL3zvJFg9FjLGdrak868feqhr6Nb -pqH9zL7bMq8YP788qTRELUnL72aDzGAM7HEj7V4yu2uD3i3Ryz3bqWaj9IF38Sa0 -6rACBkiNfZBPgExoMUm2GNVyx8hTis2XKRgz4NLh29bBkKrArK9sYDncE9ocwrrX -AQ99yn03Xv6TH8bRp0cSj4jzBXc5RFsUQG/LxzJVMjvnkDbwNE41DtFiYz5QVcv1 -cMpTH16YfzSL34a479eNq/4+JAs/zcb2wjBskJipMUU4hNx5fhthvfKwDOQbLTqN -HcP23iPQIhjdUXf6gpu5RGu4JZ0dAMHMHFKvNL6TNejwx/H6KAPp6rCRsYi6QhAb -42SXdZmhAyQsFpGD9U5ieJApqeCHfj9Xhld61GqLJA9+WLVhDPADjqHoAVvrOkKH -OtPegId/lWnCB7p551klAjiEA2/DKxFBIAEhqZpiLl+juZfMXovkdmGxMP4gvNNF -gbS2k5A0IJ8q51gZcH1F56smdAmi5kvhPnFdy/9gqeI/F11F1SkbPVLImP0mmrFi -zQD5JGfEu1psUYvhpOdaYDkmAK5qU5xHSljqZFz5hXNt4ebvSlurHAhunJb2ln3g -AJUHwtZnVBrtYMB0w6fdwYqMxXi4vLeqUiHtIQtbOq32zlSryNPQqG9H0iP9l/G1 -t7oUfr9woI/B0kduaY9jd5Qtkqs1DoyfNMSaPNohUK/CWOTD51qOadzSvK0hJ+At -033PFfv9ilaX6GmzHdEVEanrn9a+BoBCnGnuysHk/8gdswj9OzeCemyIFJD7iObN -rNex3SCf3ucnAejJOA0awaLx88O1XTteUjcFn26EUji6DRK+8JJiN2lXSyQokNeY -ox6Z4hFQDmw/Q0k/iJqe9/Dq4zA0l3Krkpra0DZoWh5kzYUA0g5+Yg6GmRNRa8YG -tuuD6qK1SBEzmCYff6ivjgsXV5+vFBSjEpx2dPEaKdYxtHMOjkttuTi1mr+19dVf -hSltbzfISbV9HafX76dhwZJ0QwsUx+aOW6OrnK8zoQc5AFOXpe9BrrOuEX01qrM0 -KX5tS8Zx5HqDLievjir194oi3r+nAiG14kYlGmOTHshu7keGCgJmzJ0iVG/i+TnV -ZSLyd8OqV1F6MET1ijgR3OPL3kt81Zy9lATWk/DgKbGBkkKAnXO2HUw9U34JFyEy -vEc81qeHci8sT5QKSFHiP3r8EcK8rT5k9CHpnbFmg7VWSMVD0/wRB/C4BiIw357a -xyJ/q1NNvOZVAyYzIzf9TjwREtyeHEo5kS6hyWSn7fbFf3sNGO2I30veWOvE6kFA -HMtF3NplOrTYcM7fAK5zJCBK20oU645TxI8GsICMog7IFidFMdRn4MaXpwAjEZO4 -44m2M+4XyeRCAZhp1Fu4mDiHGqgd44mKtwvLACVF4ygWZnACDpI17X88wMnwL4uU -vgehLZdAE89gvukSCsET1inVBnn/hVenCRbbZ++IGv2XoYvRfeezfOoNUcJXyawQ -JFqN0CRB5pliuCesTO2urn4HSwGGoeBd507pGWZmOAjbNjGswlJJXF0NFnNW/zWw -UFYy+BI9axuhWTSnCXbNbngdNQKHznKe1Lwit6AI3U9jS33pM3W+pwUAQegVdtpG -XT01YgiMCBX+b8B/xcWTww0JbeUwKXudzKsPhQmaA0lubAo04JACMfON8jSZCeRV -TyIzgacxGU6YbEKH4PhYTGl9srcWIT9iGSYD53V7Kyvjumd0Y3Qc3JLnuWZT6Oe3 -uJ4xz9jJtoaTDvPJQNK3igscjZnWZSP8XMJo1/f7vbvD57pPt1Hqdirp1EBQNshk -iX9CUh4fuGFFeHf6MtGxPofbXmvA2GYcFsOez4/2eOTEmo6H3P4Hrya97XHS0dmD -zFSAjzAlacTrn1uuxtxFTikdOwvdmQJJEfyYWCB1lqWOZi97+7nzqyXMLvMgmwug -ZF/xHFMhFTR8Wn7puuwf36JpPQiM4oQ/Lp66zkS4UlKrVsmSXIXudLMg8SQ5WqK8 -DjevEZwsHHaMtfDsnCAhAdRc2jCpyHKKnmhCDdkcdJJEymWKILUJI5PJ3XtiMHnR -Sa35OOICS0lTq4VwhUdkGwGjRoY1GsriPHd6LOt1aom14yJros1h7ta604hSCn4k -zj9p7wY9gfgkXWXNfmarrZ9NNwlHxzgSva+jbJcLmE4GMX5OFHHGlRj/9S1xC2Wf -MY9orzlooGM74NtmRi4qNkFj3dQCde8XRR4wh2IvPUCsr4j+XaoCoc3R5Rn/yNJK -zIkccJ2K14u9X/A0BLXHn5Gnd0tBYcVOqP6dQlW9UWdJC/Xooh7+CVU5cZIxuF/s -Vvg+Xwiv3XqekJRu3cMllJDp5rwe5EWZSmnoAiGKjouKAIszlevaRiD/wT6Zra3c -Wn/1U/sGop6zRscHR7pgI99NSogzpVGThUs+ez7otDBIdDbLpMjktahgWoi1Vqhc -fNZXjA6ob4zTWY/16Ys0YWxHO+MtyWTMP1dnsqePDfYXGUHe8yGxylbcjfrsVYta -4H6eYR86eU3eXB+MpS/iA4jBq4QYWR9QUkd6FDfmRGgWlMXhisPv6Pfnj384NzEV -Emeg7tW8wzWR64EON9iGeGYYa2BBl2FVaayMEoUhthhFcDM1r3/Mox5xF0qnlys4 -goWkMzqbzA2t97bC0KDGzkcHT4wMeiJBLDZ7S2J2nDAEhcTLY0P2zvOB4879pEWx -Bd15AyG1DvNssA5ooaDzKi/Li6NgDuMJ8W7+tmsBwDvwuf2N3koqBeXfKhR4rTqu -Wg1k9fX3+8DzDf0EjtDZJdfWZAynONi1PhZGbNbaMKsQ+6TflkCACInRdOADR5GM -rL7JtrgF1a9n0HD9vk2WGZqKI71tfS8zODkOZDD8aAusD2DOSmVZl48HX/t4i4Wc -3dgi/gkCMrfK3wOujb8tL4zjnlVkM7kzKk0MgHuA1w81zFjeMFvigHes4IWhQVcz -ek3l4bGifI2kzU7bGIi5e/019ppJzGsVcrOE/3z4GS0DJVk6fy7MEMIFx0LhJPlL -T+9HMH85sSYb97PTiMWpfBvNw3FSC7QQT9FC3L8d/XtMY3NvZoc7Fz7cSGaj7NXG -1OgVnAzMunPa3QaduoxMF9346s+4a+FrpRxL/3bb4skojjmmLqP4dsbD1uz0fP9y -xSifnTnrtjumYWMVi+pEb5kR0sTHl0XS7qKRi3SEfv28uh72KdvcufonIA5rnEb5 -+yqAZiqW2OxVsRoVLVODPswP4VIDiun2kCnfkQygPzxlZUeDZur0mmZ3vwC81C1Q -dZcjlukZcqUaxybUloUilqfNeby+2Uig0krLh2+AM4EqR63LeZ/tk+zCitHeRBW0 -wl3Bd7ShBFg6kN5tCJlHf/G6suIJVr+A9BXfwekO9+//CutKakCwmJTUiNWbQbtN -q3aNCnomyD3WjvUbitVO0CWYjZrmMLIsPtzyLQydpT7tjXpHgvwm5GYWdUGnNs4y -NbA262sUl7Ku/GDw1CnFYXbxl+qxbucLtCdSIFR2xUq3rEO1MXlD/txdTxn6ANax -hi9oBg8tHzuGYJFiCDCvbVVTHgWUSnm/EqfclpJzGmxt8g7vbaohW7NMmMQrLBFP -G6qBypgvotx1iJWaHVLNNiXvyqQwTtelNPAUweRoNawBp/5KTwwy/tHeF0gsVQ7y -mFX4umub9YT34Lpe7qUPKNxXzFcUgAf1SA6vyZ20UI7p42S2OT2PrahJ+uO6LQVD -+REhtN0oyS3G6HzAmKkBgw7LcV3XmAr39iSR7mdmoHSJuI9bjveAPhniK+N6uuln -xf17Qnw5NWfr9MXcLli7zqwMglU/1bNirkwVqf/ogi/zQ3JYCo6tFGf/rnGQAORJ -hvOq2SEYXnizPPIH7VrpE16+jUXwgpiQ8TDyeLPmpZVuhXTXiCaJO5lIwmLQqkmg -JqNiT9V44sksNFTGNKgZo5O9rEqfqX4dLjfv6pGJL+MFXD9if4f1JQiXJfhcRcDh -Ff9B6HukgbJ1H96eLUUNj8sL1+WPOqawkS4wg7tVaERE8CW7mqk15dCysn9shSut -I+7JU7+dZsxpj0ownrxuPAFuT8ZlcBPrFzPUwTlW1G0CbuEco8ijfy5IfbyGCn5s -K/0bOfAuNVGoOpLZ1dMki2bGdBwQOQlkLKhAxYcCVQ0/urr1Ab+VXU9kBsIU8ssN -GogKngYpuUV0PHmpzmobielOHLjNqA2v9vQSV3Ed48wRy5OCwLX1+vYmYlggMDGt -wfl+7QbXYf+k5WnELf3IqYvh8ZWexa0= + MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAU08PI+CBUqOd4 +Nbte7MLw2qCYn1UwDQYJKoZIhvcNAQEBBQAEggEAU1y8uuEyQMXa7eGlK/PB5F5+ +ZEYBHRpBpSKlyIpTJhN+emNLtuPRlIJ0L0zlfkvjMmnoApXujUb91tnHVQu2tUV4 +9Ws3goQjqIb6baQmxf8pctsL56vHts763Wl+AwiFLc7twoq/4FmmqwvFzxHE+c2o +IyxxYY72ZNorN5sux0b+ghEeZHkdds6uR/DHtht+zCy/JP63Phf53dAoUoO4p9Ym +WJhe2Mccv9t/yrtneVEIw/p1GqUPSY+tiGMNMxNvXlUrtdoaUzyzzXmqVbMXb6PB +bWFtkkRJBCMYA8Ozh4La6y8Y1jgFj6vCkoxX3s9GVQbpeyon7leanAiHwArgejCC +DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIY87YJhlLuuSAgg0QnoSp+Z+aYRAI +uNaSDIyvQ/1/xYMW6TCqp19yiOGRu5bzDNX0tKN5cCLIvRX5FZmLbLbApziZlMsV +wrHCmVBnN8XYCdZsK+Wy39ORULAfurkjem6arn/NFnfN9DLiSEYwKbSC4VNegfkT +lJlgnSVUs7Z6v86YUEuwBnmvyCDbIit3PbfKJzaCr9DSPXKwBFRZqTTsFWovBGaA +cQvbuqxbbkm4cNYmwmT84TXhjYDuTfP8KEPgdBD1F8cqB+e6OuQSG3N+tBHKi7DH +Gc+30IimJVcrwbPCNDlteHHTLxaeDM4g3eoyj7B6J+/kAMLdoWuH9kwdr75Dd5OJ +SGY7utJ+v4A92SKc7G01tQnHZYOxn+JFKWQ4y/CR2lTtYfhh8pd9jSSHsg0jGtKs +Zte/mpfrOHTpXd3K7F1/UiXTRNWbfy/7pBWPdqgSaOAuVH180VAHCDnaOtvf2w7L +tJN74gesbcwPgQiAiD9um1eOqOMObu3gqXdeIkMksbhrTSOzLuO8c3t0R+8lL6QE +2K54t7PMDQ8ScmktNMWG9heBbZmAlkLZ2VK+jfGpbVEGRSWKRkpBMQOqLGh7iRkv +EPtr44/F5cWwXVN6ofCg25aGwLrAaD9hlprGNByjGezjrFxj4NSDyKYmjhfF4+RA +CfEN/j19OadJgY8ByH+L190VOOc3Xcf0aiFJPqV+MmTm0QcOmaOIPFfwRHjWiuS1 +K5kzX15uDgIZED2NWvJtwyuJ+p8xcWtmdE0nGxhOHV+3ZZu8WZ9Qv7LU2eSJQ5De +5uzb5sDzVZI8zfQ6LX2nF7ilntzxzODcv5Eoor8NQAU5xPKvb66aRa5BV5xzCl8A +/FY61ztGpCD4DfPHFpCldcHKCPk1qzu/7kL3LQ49DV5GcVwzzanHQaINWo5xhUu1 +XaUcWe7LVOPYvqCrSF8v3dB56RHF1MJMxCNdZo1oVup3FjIU3N4ZUl5qX5Ixetp2 +ftUZHsw3r+cotronsrne8R4gl3PejIc6rVmz7cpnPY6l1T70QEEtnxcHgqIFZeCB +n3IHOBOlaS3DbtOVzclySUF3z1+Gtk8Entc1ksNX2MwknFUM2AjQWuvjVDm/ZKaY +kPtbr52IDKURYzDecuBeTuZCq7ztaOqdc0D+sLFn4Z8CBzl0OdOrDU25h/wir/r7 +DiCGFAGuPIVtsaO0C/aLCM0IJlDW9Lj9YMXy5jZ4ziRT6CmarmjO+BLBL9yHK7pR +rCEJoYRZUyw6nAZNW3EkftxMWJNe00SkJyccMPLgQA6ORnuHC3wo4EBH62vBA4vq +JszIKm8xselXbAQoyeRtXBVvFEV7gz/3US43K2HoHi+Z9N60LRw7V+aihz+nKTnC +lioA+owDvgsJmVwuERse8ZaUwXigfKyCUnrbEAYFeSIQyvKs0TG6pAGm2ZjqFJw/ +L0HLPQVUf6HLZY7HD/xCz21X3mL28VZ82Fr/luOqIk187M4CnyudmZX64tS/o+TR +n9lSJhV4H6y5WCCTSnyGnjcLSm5lMg9H+4vwRB95qfKS9B8ZLSesBbk/VUwCw1fw +IeR2S1S9PUO+J0lUKGWWrBjDNKIkR5vVLXyazO+BFz6HIq3U0Df9Gya3kng4BfZK +a3X9ALP1PEdfFeRyH7T83NN20686Q1uSzkKIKmKYp5YRuUsZdrGSSIbgO5UlWayF +YWQPIrpTy+v2lP9la9YLPdSWG0a/pMA5BFzovHgSJ733yowmw7sqn2wsZyiMTTOy +lbF7im1hbB3bfzow6SA8IE7O5XiAIyIk35HNJswPMkJWQzzuwNGKIla3f+HfPaRO +7weJPIEeQr7jUdgiQLl9A9/kHdp8jMy2jwrys6LY9rwEMAodpaN/yXYF9oOFvBsC +75az848gx7OTB/OcBKFNkeKkdWYo3GYP0DwzTcV3sV+bIllKGzGhuN7KOyn7XLSN +ZG2kEm/+s05DdxpagcGyAWKT6myDjuMo/lAll/A3bnmwrP/I5YO0bLn2cmEq6dGx +AcWC5eELHoKo9hv6pjU9BszkHIgMq2B6Oe35xnAi36RlarOU8D4+xop3IqN2Jy65 +eec15LopFUrCcVgSddf7h+qS0jQGiEPuUNZAuZBA0ZVmHzDtkHJqdSpSAXTvykVC +GIPbCWce/0X9UxxrciJ7foXebz7A9b1dkEMI0UCNBkiO5kGVJBBxGcHOtYvzWc9+ +oRhN68tOksmNFiNIuxTRG1iariPQrDocbsEy+yDDmSxJPZ4wNjPofjZ1XXaXkjs4 +Q79ptA8JLwzHv7dRCsV+r3GUllIn5TOb9adbIowmZG+nSWq3vE1AoHgymwYo064p +ZlcrtsZRfo9SeqMf3aAOgQtYDpCi2QhCipQYe0IFYWdShdQzxqXyCObm7zey6PnI +4LZ2J56Z8QXPloo8LfsmcqILWEMOxCc66k5+QFb/MKDV/lYtWZzTES/TFhRdNydw +yCdizmdTWo2wfk9YU/pcwRZUAzhk+/JQJA0tef6kyUv+ozQue4JVw8UBRoWJRrXf +mO4kGeEpoVu8Hlk3XVeEQTEMP8gre2t1WSQhgRuUPWHvsVMjRfn4K8rk4MxU94Op +XselOgz+E0n3XpwHh9gcv43t+qd5YBpE3uAI11hUJpZqsjAo8AiAXppzXZQ9Xx66 +duz3UZLobVZL5CwFuCiaE3b3rx5Qlt9SKNQA8aG6e6N1hwHzl69zT1BN2ZIvrSuL +ihtQ4E7D6KlEWhPV2c12tMgiDs1CTbOyY5uX8Q+dMilp1Y/5iC6LwzAjJ8IvhtqY +NniVsVocO9uyRe5cYPLM/F/4rcnnmoIeTbPeGiI91vGnLH+wrgZ/HSntN7C5nG6s +oay685GW620S0Ac71IcRZajNTM7Rfc9JpCNzwb2WnZw4LKyybfXcHSStk4aqw8P+ +oRsOLgRLO4m9CYnsJBcVX7oF+/IUWyPfL/4sAIUIF+7mXP+Z18paTmbZRIrvjwcA ++QhctZXYVSeUQE4RtLu7pKxTYlZZesZqVhEXj733RMwgYuQecqCMTcF6StpEsKPs +BUZDXZZrCl9kUMMB7m5bsnBGB3o/QbyS/hkNwI8pVmQHNIVKdKOcxH0cCRouKUH1 +MzYxuZfVS1dvgkhVhPeySy1AZ2A/oBFFz2PWxzftKwaZ5KwDx4VI8x3yYaMuXmvK +cyIWS+2s+Ky/ofOOAJPYiv2aaKtLnOjo+78oLyAm7NVNaQ31JFVPAxCbmEnIu4Ai +GngAH4hmVp/f2/pfGq/OI/HFFeAwwsxUKWOsLu+Di7QcT81PrkHVFadmLXxA9iyc +UmT5Oqg0h4V5PWwaGVfgDMFs7VO0dThZ+cjXLGWvC2bTWpvxJVsgq+J/MCIZsiSJ +eECBhDvvsKCmigM9+qQ7iPjLWP2DL+CvbLXWLVuaj+rjwpoAx+2ALfWP0aRsetBk +3vbKm4Pm92401TyGmV8HJfpgMrjbScrmsdv+10ljj3eigaUGGzS0UImJIXEerbia +3m31u8IaYF0fFsONHa0+0RuEhFVhtgx3ojI9wN6OM4sxIgDMY+Iyrny/Dn4qlVJo +bmW2hahljpIgT0x9KwZgflyM7VVckRIk+SzJDmqqYdEVk6CnxpKcVJgaD3z/Q4ez +0doYtQeeK7W4EWNJACosqMCFKnFZlOyMELE0gyhdeCgM1xXOU4nxzzUJXFAKukSi +6RQANERsNoXnkfYd6Pt39k4IaBkJ3/lmBVdONqoPDjwDJT887kyFo9GfxgOZ+ZAS +KlVD9YiDSXkgq4/KGq8zNb0jZiZjd02uzzYVvLfKx/TGhVy5WEnf2IeC0gLZ3wNI +jo0894/Ss0uXbbl5HoOhLdOQbYuZ5QB5S6W6TbcM5Mrt9S0rkJY7xYxnlmXTQ3A7 +q+wfi5IIAIYuRd1uwZ/msCF6L2UM6y0+So5P0X8YVY4tT1Oq8AxjJVLVMZVBPq7b +nQwChfVf5HOEfNehO52UwRA1C6IGH9/2T6lPrJOuZp7oxUE0CtVYNDbqcj9lbb7A +cEcQjQzgYnH3xmj1ZjBpyQ9zL5o0g7ZTwAq8zA1LhMBjrgSlYd2s3947Ii4xBaof +CCA8OVDeqHTqVxFQQk5rrHCDPOSHLCXAqqArXb5yl90Vk1wU7BnPe6iwScCcPbWd +rkw8twZYLNp7sCDTZ5es77Zzs431R1sc8pL/SOwbv9o30cQfbW9FZAhboyI3o/ug +RdKYlB72y8wN8ijh/UENo3W89MzHtbZ1XYMCauYn9zDUGci4Bnziqfpd/dV+CUeC +Fs/DP5f2OkiinHRmf060xj7HN7Q3SWziFbMRVO85/e7jjUcNQyBqikHXBl3V2hpM +hRPsObhPAoLVxz8fBVMYfxR1E7wTpv5KWzvWSPh4QUX+gRpCYL/h/WJ6qUqjeXMP +1u6vM7uX9+OjNkEAql9L9cPmm1GIam8yBoRsP/Om0VFKDZUvhTo1QC1Q3finiSm4 +89s7tlobx0KafcD+yNKpSFtq/XUIv3Q= \ No newline at end of file diff --git a/tests/data/wire/certs.xml b/tests/data/wire/certs.xml index 5908de7938..2db71ca055 100644 --- a/tests/data/wire/certs.xml +++ b/tests/data/wire/certs.xml @@ -1,85 +1,85 @@ 2012-11-30 - 3 + 1 Pkcs7BlobWithPfxContents - MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAUZcG9X+5aK8VZ -FY8eJV9j+RImq58wDQYJKoZIhvcNAQEBBQAEggEAn/hOytP/StyRuXHcqFq6x+Za -7gHfO8prXWdZW4e28NLt/x5ZOBHDDZ6buwwdXEZME0+RoiJvLqP2RNhZkEO8bkna -pS76xLZE4NXyfxkeEs1vJYis0WJdt/56uCzBuud2SBLuMWoAWgF5alokN0uFpVgm -CKCos+xv6Pisolc6geM8xQTYe6sLf5Z23LWftWfJqzuo/29glCCre7R80OLeZe5w -pN6XztbYz06nhVByC35To8Lm0akWAAKU7sfqM1Nty4P0rwUJPKXo42uN1GKYbDbF -x8piCAd+rs+q4Alu3qK/YaTPpMb2ECRMH6CYB8Klf/CbuWykkfS8zrsnpXT1kzCC -DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQInjJWFaJcZz2Agg0QX6NlJUH17o20 -90gfjWV01mPmzLKx71JT+hyzKr5vHywDSRI/mdb3RqA59ZrIKeyWr0HXEOuABlul -nxjc/Rfk1tiLQwh0iqlOjlMtRsxS6yDA0WNwK2Y9gaXcdgDm5Vioai18l4Pd0qzK -fsof5a/jEJyunW1CZK19QciwfQ2pS8QbRYgeLRZRft2I+kv6cWXlGS6YrMqKQC8t -QMxnXR4AuzVllPLbbIgtM3l9oS+6jl7jKyKogeroJ9FNLjoMBJLldRLGPRhkCmdJ -Z1m+s/BAVUH08qgj2kmHzucdULLjlRcmma9m/h91TcQCXHAavf7S+U9QwIyGRh83 -t4Y7EqbQ93mOgjajFzILSL7AT/irgJpDu6CJqMu3EMNDA0mjxn5Cdvj40sufL/g3 -UyBwqosmIwAPzNDmhPtTKvHaHfGY/k8WhoIYfAA5Lhq1z22/RODZOY0Ch2XyxQM4 -s35eppe6IhnwyMv6HfrCrqE/o/16OrvvbaFQTeTlMvU0P7MIR4pVW6tRq4NEa5Wx -JcvGutuMuzH1VMcqcKdc7wYyOqDOGU43kcV8PiALTIcqrhD8NDrKks1jSkyqQw2h -sJQckNaQIcCXkUQecQa2UGe0l4HDSJ5gAETSenjyLBKFHf3hxiWBpw446/bODgdk -0+oyreZqMpRz3vn9LC+Yt7SuVbTzRdx7nlKIiNvo8+btOuVm44evchLFHAq3Ni8D -c+tP/wss3K4Xdp+t5SvEY/nLIu11Lw44HDVMYTuNz3Ya9psL70ZLaLZM8NromnEl -CUMRNTPoOC7/KDRh2E9d6c1V4CC43wAsRhksGJnSYoiSVAhaVgLqVFQTsqNHxmcg -3Y9AEBVzm3fZg6+DxAYu+amb+r8lk0Pp+N1t6rVbKXhkbAAxg0UDO3pY8Xcz0Y3g -Qdd5rnHh1rJrehku7zTHvQaXEddUWmCoUGIXJ+bt4VOhErL6s5/j8GSG0xmfxgSE -jnGj4Jwd0Vv19uZjsBDQ54R88GcA9YX8r48gr9JAwplrQ50m9KX6GwQhDRYKN/Dh -zOt9DCUkqMqdi5T4v2qNTfkL7iXBMhsSkeYUQ/tFLyv4QQyli5uTUZ5FNXohOVAx -TNyV9+gcV5WiBR0Aje6rwPW3oTkrPnVfZCdBwt/mZjPNMO5Se7D/lWE33yYu7bJ+ -gaxRNynhEOB7RaOePzDjn7LExahFmTFV0sgQxwQ2BYsfI22cdkAf6qOxdK/kqiQm -lgzRpDjyPIFhaCCHnXyJdSqcHmDrCjcg2P6AVCDJGdFOBvupeJ7Kg7WV5EY7G6AU -ng16tyumJSMWSzSks9M0Ikop6xhq3cV+Q0OArJoreQ6eonezXjM9Y865xjF80nJL -V4lcRxdXfoKpXJwzc++pgkY9t55J0+cEyBvIXfKud1/HHOhewhoy5ATyi9LLM91n -iW1DaQXlvHZgE7GFMSCVLxy6ZopBbm9tF0NQDFi8zUtGulD3Gkoc/Bp+DWb2vsX4 -S8W9vByNvIz/SWOGNbEs2irTRXccMAL7JHJ+74bwZZi5DRrqyQWHCn/3Ls2YPI6z -lnfl15EE4G7g3+nrvP2lZFBXjsdG/U3HYi+tAyHkRN3oXvgnt9N76PoY8dlsNf6c -RuNqgk31uO1sX/8du3Jxz87MlzWiG3kbAHMvbcoCgy/dW4JQcM3Sqg5PmF8i9wD1 -ZuqZ7zHpWILIWd13TM3UDolQZzl+GXEX62dPPL1vBtxHhDgQicdaWFXa6DX3dVwt -DToWaAqrAPIrgxvNk5FHNCTEVTQkmCIL5JoinZSk7BAl8b085CPM6F7OjB5CR4Ts -V+6UaTUZqk+z+raL+HJNW2ds1r7+t8Po5CydMBS4M/pE7b/laUnbRu7rO8cqKucn -n+eYimib/0YuqZj9u2RXso4kzdOyIxGSGHkmSzYuoNRx80r+jHtcBBTqXk37t0FY -X5O7QItCE+uwV1Sa12yg2dgJ6vKRPCEVyMoYUBwNbKEcw1pjG9Em7HwjOZK0UrO1 -yKRz6kxffVKN9Naf7lOnXooVuedY/jcaZ2zCZtASlOe8iiQK5prM4sbMixMp9ovL -tTxy9E9kgvaI/mkzarloKPQGsk0WzuH+i39M3DOXrMf5HwfE+A55u1gnrHsxQlxp -z5acwN42+4ln6axs4aweMGAhyEtBW8TdsNomwuPk+tpqZXHI2pqS4/aVOk8R8VE7 -IqtBx2QBMINT79PDPOn3K6v9HEt9fUHJ2TWJvKRKfsu5lECJPJSJA8OQ7zzw6zQt -NXw8UhZRmNW0+eI5dykg+XsII7+njYa33EJ1Sy1Ni8ZT/izKfrKCwEm44KVAyUG5 -qUjghPPMNQY3D0qOl54DRfGVOxbHztUooblW+DnlLlpOy/+/B+H9Dscxosdx2/Mo -RftJOMlLqK7AYIYAlw1zvqZo0pf7rCcLSLt+6FrPtNZe6ULFUacZ3RqyTZovsZi5 -Ucda3bLdOHX6tKL21bRfN7L0/BjF6BJETpG3p+rBYOyCwO6HvdenpMm6cT02nrfP -QJtImjeW1ov6Pw02zNlIZAXFir78Z6AcMhV2iKEJxc1RMFBcXmylNXJmGlKYB3lJ -jWo6qumLewTz5vzRu0vZCmOf+bKmuyVxckPbrzP+4OHKhpm95Kp6sUn2pvh0S8H3 -w1pjfZ9+sIaVgMspfRPgoWTyZ0flFvAX6DHWYVejMebwfAqZaa+UAJJ6jWQbMNzo -ZtOhzCjV+2ZBYHvSiY7dtfaLwQJeMWEKIw32kEYv/Ts33n7dD/pAzZu0WCyfoqsQ -MEXhbZYSCQTJ8/gqvdlurWOJL091z6Uw810YVt+wMqsBo5lnMsS3GqkzgM2PVzuV -taddovr5CrWfAjQaFG8wcETiKEQFWS9JctKo0F+gwLwkVyc4fBSkjVmIliw1jXGu -Enf2mBei+n8EaRB2nNa/CBVGQM24WEeMNq+TqaMvnEonvMtCIEpuJAO/NzJ1pxw0 -9S+LKq3lFoIQoON5glsjV82WseAbFXmynBmSbyUY/mZQpjuNSnwLfpz4630x5vuV -VNglsZ8lW9XtSPh6GkMj+lLOCqJ5aZ4UEXDSYW7IaH4sPuQ4eAAUsKx/XlbmaOad -hgK+3gHYi98fiGGQjt9OqKzQRxVFnHtoSwbMp/gjAWqjDCFdo7RkCqFjfB1DsSj0 -TrjZU1lVMrmdEhtUNjqfRpWN82f55fxZdrHEPUQIrOywdbRiNbONwm4AfSE8ViPz -+SltYpQfF6g+tfZMwsoPSevLjdcmb1k3n8/lsEL99wpMT3NbibaXCjeJCZbAYK05 -rUw5bFTVAuv6i3Bax3rx5DqyQANS3S8TBVYrdXf9x7RpQ8oeb4oo+qn293bP4n5m -nW/D/yvsAJYcm3lD7oW7D369nV/mwKPpNC4B9q6N1FiUndvdFSbyzfNfSF9LV0RU -A/4Qm05HtE3PAUFYfwwP8MDg0HdltMn83VfqrEi/d76xlcxfoIh2RQQgqxCIS6KE -AExIY/hPYDVxApznI39xNOp7IqdPEX3i7Cv7aHeFAwbhXYMNnkfFJJTkHRdcRiJ/ -RE1QPlC7ijH+IF02PE/seYg4GWrkeW3jvi+IKQ9BPBoYIx0P+7wHXf4ZGtZMourd -N4fdwzFCDMFkS7wQC/GOqZltzF/gz1fWEGXRTH3Lqx0iKyiiLs2trQhFOzNw3B7E -WxCIUjRMAAJ6vvUdvoFlMw8WfBkzCVple4yrCqIw6fJEq8v0q8EQ7qKDTfyPnFBt -CtQZuTozfdPDnVHGmGPQKUODH/6Vwl+9/l7HDvV8/D/HKDnP581ix1a3bdokNtSK -7rBfovpzYltYGpVxsC6MZByYEpvIh5nHQouLR4L3Je2wB3F9nBGjNhBvGDQlxcne -AAgywpOpQfvfsnYRWt2vlQzwhHUgWhJmGMhGMmn4oKc5su87G7yzFEnq/yIUMOm/ -X0Zof/Qm92KCJS7YkLzP1GDO9XPMe+ZHeHVNXhVNCRxGNbHCHB9+g9v090sLLmal -jpgrDks19uHv0yYiMqBdpstzxClRWxgHwrZO6jtbr5jeJuLVUxV0uuX76oeomUj2 -mAwoD5cB1U8W9Ew+cMjp5v6gg0LTk90HftjhrZmMA0Ll6TqFWjxge+jsswOY1SZi -peuQGIHFcuQ7SEcyIbqju3bmeEGZwTz51yo8x2WqpCwB1a4UTngWJgDCySAI58fM -eRL6r478CAZjk+fu9ZA85B7tFczl3lj0B4QHxkX370ZeCHy39qw8vMYIcPk3ytI0 -vmj5UCSeQDHHDcwo54wi83IFEWUFh18gP4ty5Tfvs6qv7qd455UQZTAO7lwpdBlp -MJGlMqBHjDLGyY80p+O4vdlQBZ1uMH+48u91mokUP8p+tVVKh7bAw/HPG+SQsuNR -DXF+gTm/hRuY7IYe3C7Myzc8bDTtFw6Es9BLAqzFFAMjzDVz7wY1rnZQq4mmLcKg -AAMJaqItipKAroYIntXXJ3U8fsUt03M= + MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAU08PI+CBUqOd4 +Nbte7MLw2qCYn1UwDQYJKoZIhvcNAQEBBQAEggEASTTfHNyY+9hdXd+Eqtqk+yPb +RA7rRXWR8tQAJsdy3zAlu8WHymq945fnsf0bAW4mODIPYhhevmdo5VaI54AzAWhk +EfJvtRQlZZEMGZVKgUSwP4AG6cFaSnJuAYbi27nffM45PgD26O2WjOhnmM7minEC +31/wUoxjxVOxIc8x+Ngo+TquyBeaK1iXcchwIUnbM0xRYMfccOAEhe/iytKFPzdg +DJbDk+KbVGaUuUfhF+o4mMyJNezMUFxWkePcUgP12li57GTJSIyi8OQaFUu1qh0L +KzQ2sYl8U0WmWQBhXqvuug47WI/6XrRDpKslIV1aV4XxD1Or6H3nf0fULjQZajCC +DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQI+4Ch/cEogOSAgg0QvlelG9yDK2GE +XX1wn8Xw0wCt+zIceXs8C6QuRSmZLEkZVv8Y+duMwi2A0tcg63HOmY2AfIPvTTt8 +eto3YwIklrfF20jBvCg/pT3kfm6TICWmMNd5XesTq8UNmkqzJQQ84L3Kbs/ix2pG +9RaeXkrg0VO7FBDVH8b+jIT9IVDAEXgBQVefcCImVZ9L2hQWNABFrFXAQSTKjfFJ +IEOfXUhTiH434V1RKJczhFiH5SNZ0kbaRjmaQkXqbXQ5kKoq8VNkmFc6vPCclTmq +QJFfIUTepljWW/HuVkUycNYQQkblmWNF9FEwSx++x3Tz1FLR3UlzOkJCqr+tS3jv +WFnI16VlOHaaHA++YKhW1PUujJcEdZaXBE0FC6JZF7IOAOjSdLSmRL9yU95erfgZ +hRo2FB8EWVZitIG+DPU9vU59chGpqXYzZU4/aTpedGeWSZ9GFXRqwb6htmajjTWu +l5fIME3hWt7kcejpuXCTDcdG4YcbngZu4hcepMrUhm9g2BdmIDb1YiB7290PMop8 +4nNo97tSBvhzk300cg6+pfxy1iAv3++g/ggOI+Y/gFmgN88mmBMWm0+mocJ0SZGY +3+8K/8pDpJpfAAXSjayl7T2UXUdJe8fpOtetiHUr2zIbZXlM4IQw+0UMAVjTiaRT +BIDGoPEcpCcxqPlSTTEie166uzzPXG9skVgennjN6YopwMC/WPaFRJu/eTlQOqlB +EqvK9TKJG8u2yp00J04MGYXluY4l/o3/KLpT0mCOeOJm3KerfwQ/jU2oHHmvIATN +XYy32ULqx/CjL+N3ax0Nu+UrgMQPcVhrTN/7lnZpFLYwXetGzH/4jdNfIfTc4yGn +0GlVT6cVgJyV8wyYpbqCxHtCW83II8vXLjTfeIffHBoJU0fMMPWEIxRuMQSksm0H +F1u/rfGVSXnueshbJUD3pnvTiLPuWcOexSxP+B8BCNfi21jX5Ha+U9RKrKbHc4h9 +PkiWxU6ZEqCBkdP9ssKnmMKMsrC7sZRoYziHNeqlZp/GFQmkI+DeFlqSPn3Lv9Or +HF3bZokZCf0RGEkZDPrigaiEoL7PH/TtVZF8miL4JCLB0FVB08vWeeP5zjQT4H6J +jSC2pw+5bA2UWGshgsKKAJJihYcOuybtzglh7nqmSSZcszz3GyuDhdR8KDrYwChU +Hn13+rSWAbbqtxSyPc5fd22Q4Do2aD6PVdRadHjG0qeE7Dq46YHT3Z9KF0nQTLk8 +uYq8hL5+jQEgTnUB0yJTKdEcg05TyrMfNHWuM1pru0bqpf25vpwP5t+Sd/vgWJNc +XtRLWrMdYBuSG9zOyLaH7bj0rcMhN3ULisKej9IT/xHOWSXXZjNoe1P3q9fvtMbg +ZXAale/xJ6rXq6mLvZXivJfQJkPbSV7fByPPKO6TMnHbNEgLOGO3XtHEwC24JKup +C0ohq03QqQHEisS9Mk5LvWmSchXR3/7vCtJFyOemQom7nCy8cx4Y1JGmZ4SGSaEs +QZs7GC7Ftb/X82LRuknvS19ApOVFEs4/8t+LviD3x7Z9quVv+fZvydhzNKGRR6kQ +fYZwK7rqqkvuFKgXqNbzlrtlUqOUPXJgdO7QHOtU8z+k2NzBWfOp6j+Ef8rc3GDU +HSVZZ/Lz0RWedxRC1zoZJSol7ckMxIGIpDhtb9xgDmaGKILWOR9k+wG6+7ywQ2LE +PB3myDOclvKUDyb/DqwRS9ch9yyYSmz8WXTgdSeyOjp8QT2JQuuOOhoooHuKSxAk ++7v/Fh5bNGtjHByuzMYSdLcWsLX+UohpDoc1heVgUA3R6EuIOJTA0nC653YmqIBp +R5rsT+esub/EndweZTacmc2nDJxTKdZgMvdwhnsOZZBGsOaD7MXAS5vCsze+PQmY +4+VqqWPASaclV6CygN4qSxmww6mVgmAgWVmJqfa6vOyb3zhx68TkNEp9rxJFcJSJ +NiTTvWe0nF+o2/a1HZ8rZFdf65KsqGSiqu/6HoUuFzWLxRCqSjB9RkfSqrDVAVim +pwL46zGRsqZV+5xrRQlxINNUbg/D11zcp1zdhQvhDrpBoLMjK7AaxA5msPYFy6Gm +KMRAG2kyi802W5CPZWkbiEoUA8vkiICuxN+Pdh146zk9Ngl4PC3YpNCMtXK11ifd +hYxmWqEuQ2AcdVTckosaWrFMn5MqEcR0aAXZbnjIMgTZ6SMYJBZMWjzJhe/UQjTo +vICK7KAH82chpW2hG2I67z7e1Nv930RyL6JbYI8mSqgccPBzOBUhpHvKDM59z8Nc +eStEYDdOcMz8P+c/H3Bh4WsyMWMOwWvjyy6GX5Bpl5z94tWFRn6W4FK5iDqp+HHm +v5W1+hlFBxXtuzBcSQntcj8LoExJ2mK6BhZkaeAESMqPvNeNFmhEVUGq0/+c7T4I +L+1YkQPcm/nIpwW/ITmkGmi5n5VsvbJFDbQe+h9LI2aqvWtzA0YT5Ed77Glbdbgq +qB8EyXdr1BsBb7s7bbXm4Wf8UJkCZESg8iQExkUk8HqMJRxjctjma0DyyKVi4j8Q ++BA1EYBEX37641S+ZR9fYmQeuULGkf3d+w/ttgvm6YDZivsZYWkTscX+lUtoHhWN +5EOAfllI0/DaGX15mGONMV8YA1PoCNEX3yKJ5tVGkxxUPK+Op7ZHvJmtb1fPMRRY +z+evQ+NTXTZZzdr3Kfs4yYbuXG4e1odm2v/zBKG7JF3yWPMtXZZiMks/BkaXTq1P +LrB0VxGcMsLeQ5HbbWJtchyCWyy63CNNbfYNohjxru52DjaAQlDKQT9pOiSmGJzb +7+hNnKYnOfo6Du2ljz7C9C4mxnRJsRA2O9Cw66J5XPy1W+2+RmvP72jXwoFWYzPq +jxNs2wxOYQjEDpXBTmCbW58F5cTbSTk3D15iCtYtf31tpuPpHEnz+2OvrX0WhygN +esZJnln2Tu2ut1pVhAuJDLZTj24Y4MP0nmDINuLDAkFji0CwjACvW7M9SbIOLLYU ++5JHHjB7wqaTXWFzpt/ZKXMXlwCzWjo3pDERbrpYbwS3GHqmtcyIZK4EA7Ulka5Y +7rLPWS5eKcjX3tp2FyX5pD52TpuUMPAk6vyefX+NznP7opvJpusHbkschojFVRDA +zHIpIGeWjYcWLk5YTPagzH8o+4ci1OEk+OMc8i6PxkQDeBw1RiCAFfBnKPCSEtFk +KJlw7fspk3/chA6mmvOHjkrQmUhUuDxAVGCVxl0K5LU3Y2IQxKGtCJk5YO4XD2e7 +5b0Ub+wy4Bb0l+z8HjuqEypFXDpQTd80NbhStZBgf2cB01elsqmKD9sT9wpFGKbC +VaatDLsLx4XrBG6ueoFKBgFL6l7afEPct8wuSoUrX5MAGlge5xzQYAD5spLlEa9G +Dt2KiPCsZcqWiaHiw5vk849FXUcfFfGl+0rEKhzcfUn3zkL1mGfqZ8Nf7qjMXdMy +dbUUQYMZXtMtK3fnYBnavgaUcu0bZ7Av+GVTQvDxfpzSeMW8lK7Ko6mINFQVC8dx +TEKWX+eApFUnTb11vNNxwxdOB2l5N+kfNLnVMhuYd7l8IHQxMMQTcf8hYu0owry6 +JkIdkhnF1kXVC2YWxo4VrDPwzkBWZE28ygBNhWgKCRhZnnbDEWPuqGP/IaLN4vww +1lqkZltqZDddXvOTXN/tZmkkQHt2uP264vqJB2BkGzxOll5UDQ8V3gXwheuUGxYc +gVL4ZJSKfHnUp6oRafIBnQs5RBvqdj2wewzT8AyPWImRG6fkYvsub8qIFqG6mu4Y +ixAQ9oTgg/KOXYNsfYuLGswu/aNnAqMEjfMerSx7dDu7teETkWb+IQJtodOdE/LI +yO/puds1M+V2H0TD36zXRyvEnpfm5BTURkxM8dI6meR37/JGtObtjg+Gzjpu6HGm +sIYyhG8bvV0Vkuip4bEgBB6T39dt/DeElHABthUmzFZe/QC8j7IJjyCz40JWDJSo +8wPtOoLnLeX0ynD8x8A5NsQk3W9fgEtv0WG6Uahs7P8GEZ5Uh9GPvWQpAkjKv7OZ +XVHJdTBMJICbB1Bzr8Nl0qPfQrhFzTNBMjBEwyaBpzRiV1hdTB2YPJPbjQQtQGkO +vT/EsAEWwSqDrQrDCfGRl7mhjdAsVFMjERdJE3/2TctY8VnLaRzUTSGkpCKxl+V4 +CLrBi96N80pxer5eKYtt5gtLFw0gZeeeqb2VDj6ChVnUjJ9r0TXzyy8ztwpB8X5Y +mZUDASD1acdZZOiEp69WA6juQR0EGKQT5phh+k0HbziW+bXMM+7YwiRJzwX4obnd +wgF+wyHht3Rzaptv5JSZMkc1RGSFIdWUwEp+3Ik6DGywiTcVkU65TQ7CsQJjmmkL +AChG7tUBI4KmolT9D0rj3A90//wl3ACkCFq94m0BZOFiimUXFjqux135P5i37XRJ +/8wgWZ0nzmXdFyTkEJEessAMbCkMiDHwaT7Lbs+S0qFeobh4DD3tkONnqSNa7md4 +945Z9MJiapzD3P33TvKhyQ0wHe5W0z4= \ No newline at end of file diff --git a/tests/data/wire/certs_no_format_specified.xml b/tests/data/wire/certs_no_format_specified.xml index 4ab91a8597..14a9f6525e 100644 --- a/tests/data/wire/certs_no_format_specified.xml +++ b/tests/data/wire/certs_no_format_specified.xml @@ -1,85 +1,85 @@ 2012-11-30 - 12 + 1 - MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAUZcG9X+5aK8VZ -FY8eJV9j+RImq58wDQYJKoZIhvcNAQEBBQAEggEAn/hOytP/StyRuXHcqFq6x+Za -7gHfO8prXWdZW4e28NLt/x5ZOBHDDZ6buwwdXEZME0+RoiJvLqP2RNhZkEO8bkna -pS76xLZE4NXyfxkeEs1vJYis0WJdt/56uCzBuud2SBLuMWoAWgF5alokN0uFpVgm -CKCos+xv6Pisolc6geM8xQTYe6sLf5Z23LWftWfJqzuo/29glCCre7R80OLeZe5w -pN6XztbYz06nhVByC35To8Lm0akWAAKU7sfqM1Nty4P0rwUJPKXo42uN1GKYbDbF -x8piCAd+rs+q4Alu3qK/YaTPpMb2ECRMH6CYB8Klf/CbuWykkfS8zrsnpXT1kzCC -DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQInjJWFaJcZz2Agg0QX6NlJUH17o20 -90gfjWV01mPmzLKx71JT+hyzKr5vHywDSRI/mdb3RqA59ZrIKeyWr0HXEOuABlul -nxjc/Rfk1tiLQwh0iqlOjlMtRsxS6yDA0WNwK2Y9gaXcdgDm5Vioai18l4Pd0qzK -fsof5a/jEJyunW1CZK19QciwfQ2pS8QbRYgeLRZRft2I+kv6cWXlGS6YrMqKQC8t -QMxnXR4AuzVllPLbbIgtM3l9oS+6jl7jKyKogeroJ9FNLjoMBJLldRLGPRhkCmdJ -Z1m+s/BAVUH08qgj2kmHzucdULLjlRcmma9m/h91TcQCXHAavf7S+U9QwIyGRh83 -t4Y7EqbQ93mOgjajFzILSL7AT/irgJpDu6CJqMu3EMNDA0mjxn5Cdvj40sufL/g3 -UyBwqosmIwAPzNDmhPtTKvHaHfGY/k8WhoIYfAA5Lhq1z22/RODZOY0Ch2XyxQM4 -s35eppe6IhnwyMv6HfrCrqE/o/16OrvvbaFQTeTlMvU0P7MIR4pVW6tRq4NEa5Wx -JcvGutuMuzH1VMcqcKdc7wYyOqDOGU43kcV8PiALTIcqrhD8NDrKks1jSkyqQw2h -sJQckNaQIcCXkUQecQa2UGe0l4HDSJ5gAETSenjyLBKFHf3hxiWBpw446/bODgdk -0+oyreZqMpRz3vn9LC+Yt7SuVbTzRdx7nlKIiNvo8+btOuVm44evchLFHAq3Ni8D -c+tP/wss3K4Xdp+t5SvEY/nLIu11Lw44HDVMYTuNz3Ya9psL70ZLaLZM8NromnEl -CUMRNTPoOC7/KDRh2E9d6c1V4CC43wAsRhksGJnSYoiSVAhaVgLqVFQTsqNHxmcg -3Y9AEBVzm3fZg6+DxAYu+amb+r8lk0Pp+N1t6rVbKXhkbAAxg0UDO3pY8Xcz0Y3g -Qdd5rnHh1rJrehku7zTHvQaXEddUWmCoUGIXJ+bt4VOhErL6s5/j8GSG0xmfxgSE -jnGj4Jwd0Vv19uZjsBDQ54R88GcA9YX8r48gr9JAwplrQ50m9KX6GwQhDRYKN/Dh -zOt9DCUkqMqdi5T4v2qNTfkL7iXBMhsSkeYUQ/tFLyv4QQyli5uTUZ5FNXohOVAx -TNyV9+gcV5WiBR0Aje6rwPW3oTkrPnVfZCdBwt/mZjPNMO5Se7D/lWE33yYu7bJ+ -gaxRNynhEOB7RaOePzDjn7LExahFmTFV0sgQxwQ2BYsfI22cdkAf6qOxdK/kqiQm -lgzRpDjyPIFhaCCHnXyJdSqcHmDrCjcg2P6AVCDJGdFOBvupeJ7Kg7WV5EY7G6AU -ng16tyumJSMWSzSks9M0Ikop6xhq3cV+Q0OArJoreQ6eonezXjM9Y865xjF80nJL -V4lcRxdXfoKpXJwzc++pgkY9t55J0+cEyBvIXfKud1/HHOhewhoy5ATyi9LLM91n -iW1DaQXlvHZgE7GFMSCVLxy6ZopBbm9tF0NQDFi8zUtGulD3Gkoc/Bp+DWb2vsX4 -S8W9vByNvIz/SWOGNbEs2irTRXccMAL7JHJ+74bwZZi5DRrqyQWHCn/3Ls2YPI6z -lnfl15EE4G7g3+nrvP2lZFBXjsdG/U3HYi+tAyHkRN3oXvgnt9N76PoY8dlsNf6c -RuNqgk31uO1sX/8du3Jxz87MlzWiG3kbAHMvbcoCgy/dW4JQcM3Sqg5PmF8i9wD1 -ZuqZ7zHpWILIWd13TM3UDolQZzl+GXEX62dPPL1vBtxHhDgQicdaWFXa6DX3dVwt -DToWaAqrAPIrgxvNk5FHNCTEVTQkmCIL5JoinZSk7BAl8b085CPM6F7OjB5CR4Ts -V+6UaTUZqk+z+raL+HJNW2ds1r7+t8Po5CydMBS4M/pE7b/laUnbRu7rO8cqKucn -n+eYimib/0YuqZj9u2RXso4kzdOyIxGSGHkmSzYuoNRx80r+jHtcBBTqXk37t0FY -X5O7QItCE+uwV1Sa12yg2dgJ6vKRPCEVyMoYUBwNbKEcw1pjG9Em7HwjOZK0UrO1 -yKRz6kxffVKN9Naf7lOnXooVuedY/jcaZ2zCZtASlOe8iiQK5prM4sbMixMp9ovL -tTxy9E9kgvaI/mkzarloKPQGsk0WzuH+i39M3DOXrMf5HwfE+A55u1gnrHsxQlxp -z5acwN42+4ln6axs4aweMGAhyEtBW8TdsNomwuPk+tpqZXHI2pqS4/aVOk8R8VE7 -IqtBx2QBMINT79PDPOn3K6v9HEt9fUHJ2TWJvKRKfsu5lECJPJSJA8OQ7zzw6zQt -NXw8UhZRmNW0+eI5dykg+XsII7+njYa33EJ1Sy1Ni8ZT/izKfrKCwEm44KVAyUG5 -qUjghPPMNQY3D0qOl54DRfGVOxbHztUooblW+DnlLlpOy/+/B+H9Dscxosdx2/Mo -RftJOMlLqK7AYIYAlw1zvqZo0pf7rCcLSLt+6FrPtNZe6ULFUacZ3RqyTZovsZi5 -Ucda3bLdOHX6tKL21bRfN7L0/BjF6BJETpG3p+rBYOyCwO6HvdenpMm6cT02nrfP -QJtImjeW1ov6Pw02zNlIZAXFir78Z6AcMhV2iKEJxc1RMFBcXmylNXJmGlKYB3lJ -jWo6qumLewTz5vzRu0vZCmOf+bKmuyVxckPbrzP+4OHKhpm95Kp6sUn2pvh0S8H3 -w1pjfZ9+sIaVgMspfRPgoWTyZ0flFvAX6DHWYVejMebwfAqZaa+UAJJ6jWQbMNzo -ZtOhzCjV+2ZBYHvSiY7dtfaLwQJeMWEKIw32kEYv/Ts33n7dD/pAzZu0WCyfoqsQ -MEXhbZYSCQTJ8/gqvdlurWOJL091z6Uw810YVt+wMqsBo5lnMsS3GqkzgM2PVzuV -taddovr5CrWfAjQaFG8wcETiKEQFWS9JctKo0F+gwLwkVyc4fBSkjVmIliw1jXGu -Enf2mBei+n8EaRB2nNa/CBVGQM24WEeMNq+TqaMvnEonvMtCIEpuJAO/NzJ1pxw0 -9S+LKq3lFoIQoON5glsjV82WseAbFXmynBmSbyUY/mZQpjuNSnwLfpz4630x5vuV -VNglsZ8lW9XtSPh6GkMj+lLOCqJ5aZ4UEXDSYW7IaH4sPuQ4eAAUsKx/XlbmaOad -hgK+3gHYi98fiGGQjt9OqKzQRxVFnHtoSwbMp/gjAWqjDCFdo7RkCqFjfB1DsSj0 -TrjZU1lVMrmdEhtUNjqfRpWN82f55fxZdrHEPUQIrOywdbRiNbONwm4AfSE8ViPz -+SltYpQfF6g+tfZMwsoPSevLjdcmb1k3n8/lsEL99wpMT3NbibaXCjeJCZbAYK05 -rUw5bFTVAuv6i3Bax3rx5DqyQANS3S8TBVYrdXf9x7RpQ8oeb4oo+qn293bP4n5m -nW/D/yvsAJYcm3lD7oW7D369nV/mwKPpNC4B9q6N1FiUndvdFSbyzfNfSF9LV0RU -A/4Qm05HtE3PAUFYfwwP8MDg0HdltMn83VfqrEi/d76xlcxfoIh2RQQgqxCIS6KE -AExIY/hPYDVxApznI39xNOp7IqdPEX3i7Cv7aHeFAwbhXYMNnkfFJJTkHRdcRiJ/ -RE1QPlC7ijH+IF02PE/seYg4GWrkeW3jvi+IKQ9BPBoYIx0P+7wHXf4ZGtZMourd -N4fdwzFCDMFkS7wQC/GOqZltzF/gz1fWEGXRTH3Lqx0iKyiiLs2trQhFOzNw3B7E -WxCIUjRMAAJ6vvUdvoFlMw8WfBkzCVple4yrCqIw6fJEq8v0q8EQ7qKDTfyPnFBt -CtQZuTozfdPDnVHGmGPQKUODH/6Vwl+9/l7HDvV8/D/HKDnP581ix1a3bdokNtSK -7rBfovpzYltYGpVxsC6MZByYEpvIh5nHQouLR4L3Je2wB3F9nBGjNhBvGDQlxcne -AAgywpOpQfvfsnYRWt2vlQzwhHUgWhJmGMhGMmn4oKc5su87G7yzFEnq/yIUMOm/ -X0Zof/Qm92KCJS7YkLzP1GDO9XPMe+ZHeHVNXhVNCRxGNbHCHB9+g9v090sLLmal -jpgrDks19uHv0yYiMqBdpstzxClRWxgHwrZO6jtbr5jeJuLVUxV0uuX76oeomUj2 -mAwoD5cB1U8W9Ew+cMjp5v6gg0LTk90HftjhrZmMA0Ll6TqFWjxge+jsswOY1SZi -peuQGIHFcuQ7SEcyIbqju3bmeEGZwTz51yo8x2WqpCwB1a4UTngWJgDCySAI58fM -eRL6r478CAZjk+fu9ZA85B7tFczl3lj0B4QHxkX370ZeCHy39qw8vMYIcPk3ytI0 -vmj5UCSeQDHHDcwo54wi83IFEWUFh18gP4ty5Tfvs6qv7qd455UQZTAO7lwpdBlp -MJGlMqBHjDLGyY80p+O4vdlQBZ1uMH+48u91mokUP8p+tVVKh7bAw/HPG+SQsuNR -DXF+gTm/hRuY7IYe3C7Myzc8bDTtFw6Es9BLAqzFFAMjzDVz7wY1rnZQq4mmLcKg -AAMJaqItipKAroYIntXXJ3U8fsUt03M= + MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAU08PI+CBUqOd4 +Nbte7MLw2qCYn1UwDQYJKoZIhvcNAQEBBQAEggEASTTfHNyY+9hdXd+Eqtqk+yPb +RA7rRXWR8tQAJsdy3zAlu8WHymq945fnsf0bAW4mODIPYhhevmdo5VaI54AzAWhk +EfJvtRQlZZEMGZVKgUSwP4AG6cFaSnJuAYbi27nffM45PgD26O2WjOhnmM7minEC +31/wUoxjxVOxIc8x+Ngo+TquyBeaK1iXcchwIUnbM0xRYMfccOAEhe/iytKFPzdg +DJbDk+KbVGaUuUfhF+o4mMyJNezMUFxWkePcUgP12li57GTJSIyi8OQaFUu1qh0L +KzQ2sYl8U0WmWQBhXqvuug47WI/6XrRDpKslIV1aV4XxD1Or6H3nf0fULjQZajCC +DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQI+4Ch/cEogOSAgg0QvlelG9yDK2GE +XX1wn8Xw0wCt+zIceXs8C6QuRSmZLEkZVv8Y+duMwi2A0tcg63HOmY2AfIPvTTt8 +eto3YwIklrfF20jBvCg/pT3kfm6TICWmMNd5XesTq8UNmkqzJQQ84L3Kbs/ix2pG +9RaeXkrg0VO7FBDVH8b+jIT9IVDAEXgBQVefcCImVZ9L2hQWNABFrFXAQSTKjfFJ +IEOfXUhTiH434V1RKJczhFiH5SNZ0kbaRjmaQkXqbXQ5kKoq8VNkmFc6vPCclTmq +QJFfIUTepljWW/HuVkUycNYQQkblmWNF9FEwSx++x3Tz1FLR3UlzOkJCqr+tS3jv +WFnI16VlOHaaHA++YKhW1PUujJcEdZaXBE0FC6JZF7IOAOjSdLSmRL9yU95erfgZ +hRo2FB8EWVZitIG+DPU9vU59chGpqXYzZU4/aTpedGeWSZ9GFXRqwb6htmajjTWu +l5fIME3hWt7kcejpuXCTDcdG4YcbngZu4hcepMrUhm9g2BdmIDb1YiB7290PMop8 +4nNo97tSBvhzk300cg6+pfxy1iAv3++g/ggOI+Y/gFmgN88mmBMWm0+mocJ0SZGY +3+8K/8pDpJpfAAXSjayl7T2UXUdJe8fpOtetiHUr2zIbZXlM4IQw+0UMAVjTiaRT +BIDGoPEcpCcxqPlSTTEie166uzzPXG9skVgennjN6YopwMC/WPaFRJu/eTlQOqlB +EqvK9TKJG8u2yp00J04MGYXluY4l/o3/KLpT0mCOeOJm3KerfwQ/jU2oHHmvIATN +XYy32ULqx/CjL+N3ax0Nu+UrgMQPcVhrTN/7lnZpFLYwXetGzH/4jdNfIfTc4yGn +0GlVT6cVgJyV8wyYpbqCxHtCW83II8vXLjTfeIffHBoJU0fMMPWEIxRuMQSksm0H +F1u/rfGVSXnueshbJUD3pnvTiLPuWcOexSxP+B8BCNfi21jX5Ha+U9RKrKbHc4h9 +PkiWxU6ZEqCBkdP9ssKnmMKMsrC7sZRoYziHNeqlZp/GFQmkI+DeFlqSPn3Lv9Or +HF3bZokZCf0RGEkZDPrigaiEoL7PH/TtVZF8miL4JCLB0FVB08vWeeP5zjQT4H6J +jSC2pw+5bA2UWGshgsKKAJJihYcOuybtzglh7nqmSSZcszz3GyuDhdR8KDrYwChU +Hn13+rSWAbbqtxSyPc5fd22Q4Do2aD6PVdRadHjG0qeE7Dq46YHT3Z9KF0nQTLk8 +uYq8hL5+jQEgTnUB0yJTKdEcg05TyrMfNHWuM1pru0bqpf25vpwP5t+Sd/vgWJNc +XtRLWrMdYBuSG9zOyLaH7bj0rcMhN3ULisKej9IT/xHOWSXXZjNoe1P3q9fvtMbg +ZXAale/xJ6rXq6mLvZXivJfQJkPbSV7fByPPKO6TMnHbNEgLOGO3XtHEwC24JKup +C0ohq03QqQHEisS9Mk5LvWmSchXR3/7vCtJFyOemQom7nCy8cx4Y1JGmZ4SGSaEs +QZs7GC7Ftb/X82LRuknvS19ApOVFEs4/8t+LviD3x7Z9quVv+fZvydhzNKGRR6kQ +fYZwK7rqqkvuFKgXqNbzlrtlUqOUPXJgdO7QHOtU8z+k2NzBWfOp6j+Ef8rc3GDU +HSVZZ/Lz0RWedxRC1zoZJSol7ckMxIGIpDhtb9xgDmaGKILWOR9k+wG6+7ywQ2LE +PB3myDOclvKUDyb/DqwRS9ch9yyYSmz8WXTgdSeyOjp8QT2JQuuOOhoooHuKSxAk ++7v/Fh5bNGtjHByuzMYSdLcWsLX+UohpDoc1heVgUA3R6EuIOJTA0nC653YmqIBp +R5rsT+esub/EndweZTacmc2nDJxTKdZgMvdwhnsOZZBGsOaD7MXAS5vCsze+PQmY +4+VqqWPASaclV6CygN4qSxmww6mVgmAgWVmJqfa6vOyb3zhx68TkNEp9rxJFcJSJ +NiTTvWe0nF+o2/a1HZ8rZFdf65KsqGSiqu/6HoUuFzWLxRCqSjB9RkfSqrDVAVim +pwL46zGRsqZV+5xrRQlxINNUbg/D11zcp1zdhQvhDrpBoLMjK7AaxA5msPYFy6Gm +KMRAG2kyi802W5CPZWkbiEoUA8vkiICuxN+Pdh146zk9Ngl4PC3YpNCMtXK11ifd +hYxmWqEuQ2AcdVTckosaWrFMn5MqEcR0aAXZbnjIMgTZ6SMYJBZMWjzJhe/UQjTo +vICK7KAH82chpW2hG2I67z7e1Nv930RyL6JbYI8mSqgccPBzOBUhpHvKDM59z8Nc +eStEYDdOcMz8P+c/H3Bh4WsyMWMOwWvjyy6GX5Bpl5z94tWFRn6W4FK5iDqp+HHm +v5W1+hlFBxXtuzBcSQntcj8LoExJ2mK6BhZkaeAESMqPvNeNFmhEVUGq0/+c7T4I +L+1YkQPcm/nIpwW/ITmkGmi5n5VsvbJFDbQe+h9LI2aqvWtzA0YT5Ed77Glbdbgq +qB8EyXdr1BsBb7s7bbXm4Wf8UJkCZESg8iQExkUk8HqMJRxjctjma0DyyKVi4j8Q ++BA1EYBEX37641S+ZR9fYmQeuULGkf3d+w/ttgvm6YDZivsZYWkTscX+lUtoHhWN +5EOAfllI0/DaGX15mGONMV8YA1PoCNEX3yKJ5tVGkxxUPK+Op7ZHvJmtb1fPMRRY +z+evQ+NTXTZZzdr3Kfs4yYbuXG4e1odm2v/zBKG7JF3yWPMtXZZiMks/BkaXTq1P +LrB0VxGcMsLeQ5HbbWJtchyCWyy63CNNbfYNohjxru52DjaAQlDKQT9pOiSmGJzb +7+hNnKYnOfo6Du2ljz7C9C4mxnRJsRA2O9Cw66J5XPy1W+2+RmvP72jXwoFWYzPq +jxNs2wxOYQjEDpXBTmCbW58F5cTbSTk3D15iCtYtf31tpuPpHEnz+2OvrX0WhygN +esZJnln2Tu2ut1pVhAuJDLZTj24Y4MP0nmDINuLDAkFji0CwjACvW7M9SbIOLLYU ++5JHHjB7wqaTXWFzpt/ZKXMXlwCzWjo3pDERbrpYbwS3GHqmtcyIZK4EA7Ulka5Y +7rLPWS5eKcjX3tp2FyX5pD52TpuUMPAk6vyefX+NznP7opvJpusHbkschojFVRDA +zHIpIGeWjYcWLk5YTPagzH8o+4ci1OEk+OMc8i6PxkQDeBw1RiCAFfBnKPCSEtFk +KJlw7fspk3/chA6mmvOHjkrQmUhUuDxAVGCVxl0K5LU3Y2IQxKGtCJk5YO4XD2e7 +5b0Ub+wy4Bb0l+z8HjuqEypFXDpQTd80NbhStZBgf2cB01elsqmKD9sT9wpFGKbC +VaatDLsLx4XrBG6ueoFKBgFL6l7afEPct8wuSoUrX5MAGlge5xzQYAD5spLlEa9G +Dt2KiPCsZcqWiaHiw5vk849FXUcfFfGl+0rEKhzcfUn3zkL1mGfqZ8Nf7qjMXdMy +dbUUQYMZXtMtK3fnYBnavgaUcu0bZ7Av+GVTQvDxfpzSeMW8lK7Ko6mINFQVC8dx +TEKWX+eApFUnTb11vNNxwxdOB2l5N+kfNLnVMhuYd7l8IHQxMMQTcf8hYu0owry6 +JkIdkhnF1kXVC2YWxo4VrDPwzkBWZE28ygBNhWgKCRhZnnbDEWPuqGP/IaLN4vww +1lqkZltqZDddXvOTXN/tZmkkQHt2uP264vqJB2BkGzxOll5UDQ8V3gXwheuUGxYc +gVL4ZJSKfHnUp6oRafIBnQs5RBvqdj2wewzT8AyPWImRG6fkYvsub8qIFqG6mu4Y +ixAQ9oTgg/KOXYNsfYuLGswu/aNnAqMEjfMerSx7dDu7teETkWb+IQJtodOdE/LI +yO/puds1M+V2H0TD36zXRyvEnpfm5BTURkxM8dI6meR37/JGtObtjg+Gzjpu6HGm +sIYyhG8bvV0Vkuip4bEgBB6T39dt/DeElHABthUmzFZe/QC8j7IJjyCz40JWDJSo +8wPtOoLnLeX0ynD8x8A5NsQk3W9fgEtv0WG6Uahs7P8GEZ5Uh9GPvWQpAkjKv7OZ +XVHJdTBMJICbB1Bzr8Nl0qPfQrhFzTNBMjBEwyaBpzRiV1hdTB2YPJPbjQQtQGkO +vT/EsAEWwSqDrQrDCfGRl7mhjdAsVFMjERdJE3/2TctY8VnLaRzUTSGkpCKxl+V4 +CLrBi96N80pxer5eKYtt5gtLFw0gZeeeqb2VDj6ChVnUjJ9r0TXzyy8ztwpB8X5Y +mZUDASD1acdZZOiEp69WA6juQR0EGKQT5phh+k0HbziW+bXMM+7YwiRJzwX4obnd +wgF+wyHht3Rzaptv5JSZMkc1RGSFIdWUwEp+3Ik6DGywiTcVkU65TQ7CsQJjmmkL +AChG7tUBI4KmolT9D0rj3A90//wl3ACkCFq94m0BZOFiimUXFjqux135P5i37XRJ +/8wgWZ0nzmXdFyTkEJEessAMbCkMiDHwaT7Lbs+S0qFeobh4DD3tkONnqSNa7md4 +945Z9MJiapzD3P33TvKhyQ0wHe5W0z4= - + \ No newline at end of file diff --git a/tests/data/wire/ext_conf-no_gs_metadata.xml b/tests/data/wire/ext_conf-no_gs_metadata.xml index 605e484254..ef5d3a1647 100644 --- a/tests/data/wire/ext_conf-no_gs_metadata.xml +++ b/tests/data/wire/ext_conf-no_gs_metadata.xml @@ -19,7 +19,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf.xml b/tests/data/wire/ext_conf.xml index 54d785159f..099ebacf30 100644 --- a/tests/data/wire/ext_conf.xml +++ b/tests/data/wire/ext_conf.xml @@ -19,7 +19,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_additional_locations.xml b/tests/data/wire/ext_conf_additional_locations.xml index 8f5e746b06..20c7fb873d 100644 --- a/tests/data/wire/ext_conf_additional_locations.xml +++ b/tests/data/wire/ext_conf_additional_locations.xml @@ -24,7 +24,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_autoupgrade.xml b/tests/data/wire/ext_conf_autoupgrade.xml index 77a201ad9c..74acf0af7e 100644 --- a/tests/data/wire/ext_conf_autoupgrade.xml +++ b/tests/data/wire/ext_conf_autoupgrade.xml @@ -21,7 +21,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_autoupgrade_internalversion.xml b/tests/data/wire/ext_conf_autoupgrade_internalversion.xml index 44cad87819..afa27c6797 100644 --- a/tests/data/wire/ext_conf_autoupgrade_internalversion.xml +++ b/tests/data/wire/ext_conf_autoupgrade_internalversion.xml @@ -21,7 +21,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_dependencies_with_empty_settings.xml b/tests/data/wire/ext_conf_dependencies_with_empty_settings.xml index b26395ec23..f705c2f1b0 100644 --- a/tests/data/wire/ext_conf_dependencies_with_empty_settings.xml +++ b/tests/data/wire/ext_conf_dependencies_with_empty_settings.xml @@ -25,7 +25,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_in_vm_artifacts_profile.xml b/tests/data/wire/ext_conf_in_vm_artifacts_profile.xml index a1af74f784..9575139a62 100644 --- a/tests/data/wire/ext_conf_in_vm_artifacts_profile.xml +++ b/tests/data/wire/ext_conf_in_vm_artifacts_profile.xml @@ -20,7 +20,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_in_vm_empty_artifacts_profile.xml b/tests/data/wire/ext_conf_in_vm_empty_artifacts_profile.xml index cd5bb3d3e9..a0c87cfb6d 100644 --- a/tests/data/wire/ext_conf_in_vm_empty_artifacts_profile.xml +++ b/tests/data/wire/ext_conf_in_vm_empty_artifacts_profile.xml @@ -20,7 +20,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_in_vm_metadata.xml b/tests/data/wire/ext_conf_in_vm_metadata.xml index 9a4f89cb81..ff5e92ae25 100644 --- a/tests/data/wire/ext_conf_in_vm_metadata.xml +++ b/tests/data/wire/ext_conf_in_vm_metadata.xml @@ -21,7 +21,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_internalversion.xml b/tests/data/wire/ext_conf_internalversion.xml index 44cad87819..afa27c6797 100644 --- a/tests/data/wire/ext_conf_internalversion.xml +++ b/tests/data/wire/ext_conf_internalversion.xml @@ -21,7 +21,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_invalid_and_valid_handlers.xml b/tests/data/wire/ext_conf_invalid_and_valid_handlers.xml index f9c95d694a..bede284e7e 100644 --- a/tests/data/wire/ext_conf_invalid_and_valid_handlers.xml +++ b/tests/data/wire/ext_conf_invalid_and_valid_handlers.xml @@ -22,11 +22,11 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_invalid_vm_metadata.xml b/tests/data/wire/ext_conf_invalid_vm_metadata.xml index 7c766220e5..4eb35e87ae 100644 --- a/tests/data/wire/ext_conf_invalid_vm_metadata.xml +++ b/tests/data/wire/ext_conf_invalid_vm_metadata.xml @@ -21,7 +21,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_multiple_extensions.xml b/tests/data/wire/ext_conf_multiple_extensions.xml index 5845a179f5..bde568bd1b 100644 --- a/tests/data/wire/ext_conf_multiple_extensions.xml +++ b/tests/data/wire/ext_conf_multiple_extensions.xml @@ -25,22 +25,22 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIIB4AYJKoZIhvcNAQcDoIIB0TCCAc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEANYey5W0qDqC6RHZlVnpLp2dWrMr1Rt5TCFkOjq1jU4y2y1FPtsTTKq9Z5pdGb/IHQo9VcT+OFglO3bChMbqc1vgmk4wkTQkgJVD3C8Rq4nv3uvQIux+g8zsa1MPKT5fTwG/dcrBp9xqySJLexUiuJljmNJgorGc0KtLwjnad4HTSKudDSo5DGskSDLxxLZYx0VVtQvgekOOwT/0C0pN4+JS/766jdUAnHR3oOuD5Dx7/c6EhFSoiYXMA0bUzH7VZeF8j/rkP1xscLQRrCScCNV2Ox424Y4RBbcbP/p69lDxGURcIKLKrIUhQdC8CfUMkQUEmFDLcOtxutCTFBZYMJzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECCuc0a4Gl8PAgDgcHekee/CivSTCXntJiCrltUDob8cX4YtIS6lq3H08Ar+2tKkpg5e3bOkdAo3q2GfIrGDm4MtVWw==","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIIB4AYJKoZIhvcNAQcDoIIB0TCCAc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEANYey5W0qDqC6RHZlVnpLp2dWrMr1Rt5TCFkOjq1jU4y2y1FPtsTTKq9Z5pdGb/IHQo9VcT+OFglO3bChMbqc1vgmk4wkTQkgJVD3C8Rq4nv3uvQIux+g8zsa1MPKT5fTwG/dcrBp9xqySJLexUiuJljmNJgorGc0KtLwjnad4HTSKudDSo5DGskSDLxxLZYx0VVtQvgekOOwT/0C0pN4+JS/766jdUAnHR3oOuD5Dx7/c6EhFSoiYXMA0bUzH7VZeF8j/rkP1xscLQRrCScCNV2Ox424Y4RBbcbP/p69lDxGURcIKLKrIUhQdC8CfUMkQUEmFDLcOtxutCTFBZYMJzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECCuc0a4Gl8PAgDgcHekee/CivSTCXntJiCrltUDob8cX4YtIS6lq3H08Ar+2tKkpg5e3bOkdAo3q2GfIrGDm4MtVWw==","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIIBwAYJKoZIhvcNAQcDoIIBsTCCAa0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEABILhQPoMx3NEbd/sS0xAAE4rJXwzJSE0bWr4OaKpcGS4ePtaNW8XWm+psYR9CBlXuGCuDVlFEdPmO2Ai8NX8TvT7RVYYc6yVQKpNQqO6Q9g9O52XXX4tBSFSCfoTzd1kbGC1c2wbXDyeROGCjraWuGHd4C9s9gytpgAlYicZjOqV3deo30F4vXZ+ZhCNpMkOvSXcsNpzTzQ/mskwNubN8MPkg/jEAzTHRpiJl3tjGtTqm00GHMqFF8/31jnoLQeQnWSmY+FBpiTUhPzyjufIcoZ+ueGXZiJ77xyH2Rghh5wvQM8oTVy2dwFQGeqjHOVgdgRNi/HgfZhcdltaQ8kjYDA7BgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECHPM0ZKBn+aWgBiVPT7zlkJA8eGuH7bNMTQCtGoJezToa24=","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIIBwAYJKoZIhvcNAQcDoIIBsTCCAa0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEABILhQPoMx3NEbd/sS0xAAE4rJXwzJSE0bWr4OaKpcGS4ePtaNW8XWm+psYR9CBlXuGCuDVlFEdPmO2Ai8NX8TvT7RVYYc6yVQKpNQqO6Q9g9O52XXX4tBSFSCfoTzd1kbGC1c2wbXDyeROGCjraWuGHd4C9s9gytpgAlYicZjOqV3deo30F4vXZ+ZhCNpMkOvSXcsNpzTzQ/mskwNubN8MPkg/jEAzTHRpiJl3tjGtTqm00GHMqFF8/31jnoLQeQnWSmY+FBpiTUhPzyjufIcoZ+ueGXZiJ77xyH2Rghh5wvQM8oTVy2dwFQGeqjHOVgdgRNi/HgfZhcdltaQ8kjYDA7BgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECHPM0ZKBn+aWgBiVPT7zlkJA8eGuH7bNMTQCtGoJezToa24=","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIIB4AYJKoZIhvcNAQcDoIIB0TCCAc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEAGSKUDRN64DIB7FS7yKXa07OXaFPhmdNnNDOAOD3/WVFb9fQ2bztV46waq7iRO+lpz7LSerRzIe6Kod9zCfK7ryukRomVHIfTIBwPjQ+Otn8ZD2aVcrxR0EI95x/SGyiESJRQnOMbpoVSWSu2KJUCPfycQ4ODbaazDc61k0JCmmRy12rQ4ttyWKhYwpwI2OYFHGr39N/YYq6H8skHj5ve1605i4P9XpfEyIwF5BbX59tDOAFFQtX7jzQcz//LtaHHjwLmysmD9OG5XyvfbBICwSYJfMX9Jh1aahLwcjL8Bd0vYyGL1ItMQF5KfDwog4+HLcRGx+S02Yngm3/YKS9DmzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECFGLNfK0bO5OgDgH90bRzqfgKK6EEh52XJfHz9G/ZL1mqP/ueWqo95PtEFo1gvI7z25V/pT0tBGibXgRhQXLFmwVTA==","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIIB4AYJKoZIhvcNAQcDoIIB0TCCAc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEAGSKUDRN64DIB7FS7yKXa07OXaFPhmdNnNDOAOD3/WVFb9fQ2bztV46waq7iRO+lpz7LSerRzIe6Kod9zCfK7ryukRomVHIfTIBwPjQ+Otn8ZD2aVcrxR0EI95x/SGyiESJRQnOMbpoVSWSu2KJUCPfycQ4ODbaazDc61k0JCmmRy12rQ4ttyWKhYwpwI2OYFHGr39N/YYq6H8skHj5ve1605i4P9XpfEyIwF5BbX59tDOAFFQtX7jzQcz//LtaHHjwLmysmD9OG5XyvfbBICwSYJfMX9Jh1aahLwcjL8Bd0vYyGL1ItMQF5KfDwog4+HLcRGx+S02Yngm3/YKS9DmzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECFGLNfK0bO5OgDgH90bRzqfgKK6EEh52XJfHz9G/ZL1mqP/ueWqo95PtEFo1gvI7z25V/pT0tBGibXgRhQXLFmwVTA==","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIIEzAYJKoZIhvcNAQcDoIIEvTCCBLkCAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEAFqLDBFGeuglluYmZb0Zw+ZlMiMIws9/LgmurVSRUTU/nSleIc9vOLcukfMeCpMativzHe23iDFy6p3XDkViNcuzqbhlPq5LQsXXg+xaUrrg8Xy+q7KUQdxzPdNBdpgkUh6yE2EFbqVLQ/7x+TkkSsw35uPT0nEqSj3yYFGH7X/NJ49fKU+ZvFDp/N+o54UbE6ZdxlHFtz6NJFxx5w4z5adQ8DgnUyS0bJ2denolknODfSW2D2alm00SXlI88CAjeHgEDkoLCduwkrDkSFAODcAiEHHX8oYCnfanatpjm7ZgSutS9y7+XUnGWxDYoujHDI9bbV0WpyDcx/DIrlZ+WcTCCA0UGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIrL18Lbp1qU6AggMgGklvozqr8HqYP+DwkvxdwHSpo+23QFxh70os+NJRtVgBv5NjPEziXo3FpXHMPvt0kp0IwXbwyy5vwnjCTA2sQOYgj77X6RmwF6+1gt2DIHDN1Q6jWzdcXZVHykSiF3gshbebRKO0hydfCaCyYL36HOZ8ugyCctOon5EflrnoOYDDHRbsr30DAxZCAwGOGZEeoU2+U+YdhuMvplnMryD1f6b8FQ7jXihe/zczAibX5/22NxhsVgALdsV5h6hwuTbspDt3V15/VU8ak7a4xxdBfXOX0HcQI86oqsFr7S7zIveoQHsW+wzlyMjwi6DRPFpz2wFkv5ivgFEvtCzDQP4aCqGI8VdqzR7aUDnuqiSCe/cbmv5mSmTYlDPTR03WS0IvgyeoNAzqCbYQe44AUBEZb/yT8Z3XxwW0GzcPMZQ0XjpcZiaKAueN9V8nJgNCEDPTJqpSjy+tEHmSgxn70+E57F0vzPvdQ3vOEeRj8zlBblHd4uVrhxdBMUuQ73JEQEha5rz0qcUy04Wmjld1rBuX6pdOqrArAYzTLJbIuLqDjlnYFsHLs9QBGvIEb9VFOlAm5JW8npBbIRHXqPfwZWs60+uNksTtsN3MxBxUWJPOByb4xRNx+nRpTOvfKKFlgq1ReK5bGSTCB7x0Ft3+T42LOQDrBPyxxtGzWs+aq05qFgI4n0h8X82wxJflK+kUdwvvG/ZY5MM+/le2zOrUeyzvxXsHoRetgg+DOk7v+v7VsuT1KuvTXvgzxoOFF3/T2pNPpE3h6bbP2BUqZ2yzPNziGFslywDLZ8W3OUZoQejGqobRePdgUoBi5q2um/sPnq81kOJ/qhIOVq581ZD4IQWLot8eK8vX0G/y7y71YelRR51cUfgR5WvZZf6LvYw+GpwOtSViugl9QxGCviSLgHTJSSEm0ijtbzKhwP4vEyydNDrz8+WYB8DNIV7K2Pc8JyxAM03FYX30CaaJ40pbEUuVQVEnkAD2E//29/ZzgNTf/LBMzMEP5j7wlL+QQpmPAtL/FlBrOJ4nDEqsOOhWzI1MN51xRZuv3e2RqzVPiSmrKtk=","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIIEzAYJKoZIhvcNAQcDoIIEvTCCBLkCAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEAFqLDBFGeuglluYmZb0Zw+ZlMiMIws9/LgmurVSRUTU/nSleIc9vOLcukfMeCpMativzHe23iDFy6p3XDkViNcuzqbhlPq5LQsXXg+xaUrrg8Xy+q7KUQdxzPdNBdpgkUh6yE2EFbqVLQ/7x+TkkSsw35uPT0nEqSj3yYFGH7X/NJ49fKU+ZvFDp/N+o54UbE6ZdxlHFtz6NJFxx5w4z5adQ8DgnUyS0bJ2denolknODfSW2D2alm00SXlI88CAjeHgEDkoLCduwkrDkSFAODcAiEHHX8oYCnfanatpjm7ZgSutS9y7+XUnGWxDYoujHDI9bbV0WpyDcx/DIrlZ+WcTCCA0UGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIrL18Lbp1qU6AggMgGklvozqr8HqYP+DwkvxdwHSpo+23QFxh70os+NJRtVgBv5NjPEziXo3FpXHMPvt0kp0IwXbwyy5vwnjCTA2sQOYgj77X6RmwF6+1gt2DIHDN1Q6jWzdcXZVHykSiF3gshbebRKO0hydfCaCyYL36HOZ8ugyCctOon5EflrnoOYDDHRbsr30DAxZCAwGOGZEeoU2+U+YdhuMvplnMryD1f6b8FQ7jXihe/zczAibX5/22NxhsVgALdsV5h6hwuTbspDt3V15/VU8ak7a4xxdBfXOX0HcQI86oqsFr7S7zIveoQHsW+wzlyMjwi6DRPFpz2wFkv5ivgFEvtCzDQP4aCqGI8VdqzR7aUDnuqiSCe/cbmv5mSmTYlDPTR03WS0IvgyeoNAzqCbYQe44AUBEZb/yT8Z3XxwW0GzcPMZQ0XjpcZiaKAueN9V8nJgNCEDPTJqpSjy+tEHmSgxn70+E57F0vzPvdQ3vOEeRj8zlBblHd4uVrhxdBMUuQ73JEQEha5rz0qcUy04Wmjld1rBuX6pdOqrArAYzTLJbIuLqDjlnYFsHLs9QBGvIEb9VFOlAm5JW8npBbIRHXqPfwZWs60+uNksTtsN3MxBxUWJPOByb4xRNx+nRpTOvfKKFlgq1ReK5bGSTCB7x0Ft3+T42LOQDrBPyxxtGzWs+aq05qFgI4n0h8X82wxJflK+kUdwvvG/ZY5MM+/le2zOrUeyzvxXsHoRetgg+DOk7v+v7VsuT1KuvTXvgzxoOFF3/T2pNPpE3h6bbP2BUqZ2yzPNziGFslywDLZ8W3OUZoQejGqobRePdgUoBi5q2um/sPnq81kOJ/qhIOVq581ZD4IQWLot8eK8vX0G/y7y71YelRR51cUfgR5WvZZf6LvYw+GpwOtSViugl9QxGCviSLgHTJSSEm0ijtbzKhwP4vEyydNDrz8+WYB8DNIV7K2Pc8JyxAM03FYX30CaaJ40pbEUuVQVEnkAD2E//29/ZzgNTf/LBMzMEP5j7wlL+QQpmPAtL/FlBrOJ4nDEqsOOhWzI1MN51xRZuv3e2RqzVPiSmrKtk=","publicSettings":{"foo":"bar"}}}]} diff --git a/tests/data/wire/ext_conf_no_public.xml b/tests/data/wire/ext_conf_no_public.xml index 63e7013cc0..5ee9635cca 100644 --- a/tests/data/wire/ext_conf_no_public.xml +++ b/tests/data/wire/ext_conf_no_public.xml @@ -39,7 +39,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK"}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK"}}]} diff --git a/tests/data/wire/ext_conf_required_features.xml b/tests/data/wire/ext_conf_required_features.xml index 798ba5c52d..2dedcdbab2 100644 --- a/tests/data/wire/ext_conf_required_features.xml +++ b/tests/data/wire/ext_conf_required_features.xml @@ -32,7 +32,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_rsm_version.xml b/tests/data/wire/ext_conf_rsm_version.xml index 806063541a..d76ac6453c 100644 --- a/tests/data/wire/ext_conf_rsm_version.xml +++ b/tests/data/wire/ext_conf_rsm_version.xml @@ -25,7 +25,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} diff --git a/tests/data/wire/ext_conf_sequencing.xml b/tests/data/wire/ext_conf_sequencing.xml index 3c9a2ddd79..99ffd402c3 100644 --- a/tests/data/wire/ext_conf_sequencing.xml +++ b/tests/data/wire/ext_conf_sequencing.xml @@ -23,12 +23,12 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_settings_case_mismatch.xml b/tests/data/wire/ext_conf_settings_case_mismatch.xml index 71286c5bf5..cb7c82d73b 100644 --- a/tests/data/wire/ext_conf_settings_case_mismatch.xml +++ b/tests/data/wire/ext_conf_settings_case_mismatch.xml @@ -25,27 +25,27 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} diff --git a/tests/data/wire/ext_conf_upgradeguid.xml b/tests/data/wire/ext_conf_upgradeguid.xml index 2ec7147bb8..7cd013b5b5 100644 --- a/tests/data/wire/ext_conf_upgradeguid.xml +++ b/tests/data/wire/ext_conf_upgradeguid.xml @@ -19,7 +19,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/ext_conf_version_missing_in_agent_family.xml b/tests/data/wire/ext_conf_version_missing_in_agent_family.xml index 3f81ed1195..eee17a4ef1 100644 --- a/tests/data/wire/ext_conf_version_missing_in_agent_family.xml +++ b/tests/data/wire/ext_conf_version_missing_in_agent_family.xml @@ -23,7 +23,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} diff --git a/tests/data/wire/ext_conf_version_missing_in_manifest.xml b/tests/data/wire/ext_conf_version_missing_in_manifest.xml index c750d5d3a2..4d3ebd70ce 100644 --- a/tests/data/wire/ext_conf_version_missing_in_manifest.xml +++ b/tests/data/wire/ext_conf_version_missing_in_manifest.xml @@ -31,7 +31,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} diff --git a/tests/data/wire/ext_conf_version_not_from_rsm.xml b/tests/data/wire/ext_conf_version_not_from_rsm.xml index 9da8f5da72..9636c80d43 100644 --- a/tests/data/wire/ext_conf_version_not_from_rsm.xml +++ b/tests/data/wire/ext_conf_version_not_from_rsm.xml @@ -25,7 +25,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} diff --git a/tests/data/wire/ext_conf_vm_not_enabled_for_rsm_upgrades.xml b/tests/data/wire/ext_conf_vm_not_enabled_for_rsm_upgrades.xml index 384723f461..e7017c4ce3 100644 --- a/tests/data/wire/ext_conf_vm_not_enabled_for_rsm_upgrades.xml +++ b/tests/data/wire/ext_conf_vm_not_enabled_for_rsm_upgrades.xml @@ -25,7 +25,7 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} diff --git a/tests/data/wire/invalid_config/ext_conf_multiple_depends_on_for_single_handler.xml b/tests/data/wire/invalid_config/ext_conf_multiple_depends_on_for_single_handler.xml index 8d76b732c9..a9aa7c49a5 100644 --- a/tests/data/wire/invalid_config/ext_conf_multiple_depends_on_for_single_handler.xml +++ b/tests/data/wire/invalid_config/ext_conf_multiple_depends_on_for_single_handler.xml @@ -28,16 +28,16 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/invalid_config/ext_conf_multiple_runtime_settings_same_plugin.xml b/tests/data/wire/invalid_config/ext_conf_multiple_runtime_settings_same_plugin.xml index 43e1e02819..4de9a4cebb 100644 --- a/tests/data/wire/invalid_config/ext_conf_multiple_runtime_settings_same_plugin.xml +++ b/tests/data/wire/invalid_config/ext_conf_multiple_runtime_settings_same_plugin.xml @@ -21,8 +21,8 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/invalid_config/ext_conf_multiple_settings_for_same_handler.xml b/tests/data/wire/invalid_config/ext_conf_multiple_settings_for_same_handler.xml index 7351c8bf56..a1cc86381b 100644 --- a/tests/data/wire/invalid_config/ext_conf_multiple_settings_for_same_handler.xml +++ b/tests/data/wire/invalid_config/ext_conf_multiple_settings_for_same_handler.xml @@ -21,10 +21,10 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml b/tests/data/wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml index dcf1014641..7220b59c72 100644 --- a/tests/data/wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml +++ b/tests/data/wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml @@ -19,10 +19,10 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/invalid_config/ext_conf_single_and_multi_config_settings_same_plugin.xml b/tests/data/wire/invalid_config/ext_conf_single_and_multi_config_settings_same_plugin.xml index 8a30ddbaf2..899d23398a 100644 --- a/tests/data/wire/invalid_config/ext_conf_single_and_multi_config_settings_same_plugin.xml +++ b/tests/data/wire/invalid_config/ext_conf_single_and_multi_config_settings_same_plugin.xml @@ -21,8 +21,8 @@ - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} - {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} + {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]} https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo diff --git a/tests/data/wire/trans_cert b/tests/data/wire/trans_cert index 35793e019f..c522a2f519 100644 --- a/tests/data/wire/trans_cert +++ b/tests/data/wire/trans_cert @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDEzCCAfugAwIBAgIUDcHXiRT74wOkLZYnyoZibT9+2G8wDQYJKoZIhvcNAQEL -BQAwGTEXMBUGA1UEAwwOTGludXhUcmFuc3BvcnQwHhcNMjIwODEyMTgzMTM5WhcN -MjQwODExMTgzMTM5WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBAK/XWh+Djc2WYoJ/8FkZd8OV3V47fID5 -WV8hSBz/i/hVUKHhCWTQfE4VcQBGYFyK8lMKIBV7t6Bq05TQGuB8148HSjIboDx3 -Ndd0C/+lYcBE1izMrHKZYhcy7lSlEUk+y5iye0cA5k/dlJhfwoxWolw0E2dMOjlY -qzkEGJdyS6+hFddo696HzD7OYhxh1r50aHPWqY8NnC51487loOtPs4LYA2bd3HSg -ECpOtKzyJW+GP0H2vBa7MrXrZOnD1K2j2xb8nTnYnpNtlmnZPj7VYFsLOlsq547X -nFiSptPWslbVogkUVkCZlAqkMcJ/OtH70ZVjLyjFd6j7J/Wy8MrA7pECAwEAAaNT -MFEwHQYDVR0OBBYEFGXBvV/uWivFWRWPHiVfY/kSJqufMB8GA1UdIwQYMBaAFGXB -vV/uWivFWRWPHiVfY/kSJqufMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL -BQADggEBABjatix/q90u6X/Jar/UkKiL2zx36s4huPU9KG8F51g48cYRjrvpK4+H -K6avCGArl7h1gczaGS7LTOHFUU25eg/BBcKcXEO3aryQph2A167ip89UM55LxlnC -QVVV9HAnEw5qAoh0wlZ65fVN+SE8FdasYlbbbp7c4At/LZruSj+IIapZDwwJxcBk -YlSOa34v1Uay09+Hgu95dYQjI9txJW1ViRVlDpKbieGTzROI6s3uk+3rhxxlH2Zi -Z9UqNmPfH9UE1xgSk/wkMWW22h/x51qIRKAZ4EzmdHVXdT/BarIuHxtHH8hIPNSL -FjetCMVZNBej2HXL9cY5UVFYCG6JG0Q= +MIIDEzCCAfugAwIBAgIUToMqRt0z6FfqfiJhS1Hh+u2j3VEwDQYJKoZIhvcNAQEL +BQAwGTEXMBUGA1UEAwwOTGludXhUcmFuc3BvcnQwHhcNMjQwODAxMTYwOTU2WhcN +MjYwODAxMTYwOTU2WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMs8jttzIHATj1BNs3r4cCOAMuVaM1b7 +Aw8D7Lz3rTxFieQCh1vLSFl1l9SQmO7rmh0OfEzIKK8jAU4wkLclgospKuYpB9ME +5QnXbLpXWYfW99V4safGvv9lGZztGKMd4ZT2it9QcpKEFFi6W7cjIyiUuyYMB0uI +IvA6s6tGs8LgN89Lx7HSTSR86QNPvRtTw0jlrr8nfM7EkaT9Q6xu6GjCp89wCx+h +IwcPtstSgfMo5P+3IO30L1wSM+CF1n+nD9M8E4wtcxhoWLuyAPhDsw5f7jKyHmRo +Nm9RxToM0ON67SmN2906i0NxzXWtuttww6KE/O6BEZKNlnp9ja3bnM8CAwEAAaNT +MFEwHQYDVR0OBBYEFNPDyPggVKjneDW7XuzC8NqgmJ9VMB8GA1UdIwQYMBaAFNPD +yPggVKjneDW7XuzC8NqgmJ9VMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAFuVgcimwPxgpwKNvyUKMY9VFa6UVZs/ky6FEEaxrKVAl2GZF9MoSTO5 +vXMdWYHtSF+RWYxCz5pt7Bv97zuEXvbino/JvsLrE8f265Woe2CdDOPiBCHWBOlH ++wM71Hoh0TX7V2TSumona6e0cqUPT7fbNdaNZm8ZHoUscbbPmamERH9Z9zUXWPLk +mtjwz17bvRriAMrglA/Dm3xHiEYBJv3+4FnOqPGfg9vZH6xfmrRwrF1Moj5jEZz5 +cN2N+vO8HCEqGMBCpSlsWq1c2r3NwLH0J3b6EL7X4jcVvpykKg3WmOZGdataYDk9 +0IHy8VyGiX7g3EJOAbbf12FjgLAt4NM= -----END CERTIFICATE----- diff --git a/tests/data/wire/trans_prv b/tests/data/wire/trans_prv index 17bdb07c65..876b8351b2 100644 --- a/tests/data/wire/trans_prv +++ b/tests/data/wire/trans_prv @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCv11ofg43NlmKC -f/BZGXfDld1eO3yA+VlfIUgc/4v4VVCh4Qlk0HxOFXEARmBcivJTCiAVe7egatOU -0BrgfNePB0oyG6A8dzXXdAv/pWHARNYszKxymWIXMu5UpRFJPsuYsntHAOZP3ZSY -X8KMVqJcNBNnTDo5WKs5BBiXckuvoRXXaOveh8w+zmIcYda+dGhz1qmPDZwudePO -5aDrT7OC2ANm3dx0oBAqTrSs8iVvhj9B9rwWuzK162Tpw9Sto9sW/J052J6TbZZp -2T4+1WBbCzpbKueO15xYkqbT1rJW1aIJFFZAmZQKpDHCfzrR+9GVYy8oxXeo+yf1 -svDKwO6RAgMBAAECggEAEwBogsNKjY7Usll09Yvk/0OwmkA/YgiP+dG04z1SONGv -Vu7kfvpwlFeI0IjKXPW+3e5YLTojS7h/iLM8VEnpWVFmWSfXFvGi5ddqfIO4nnhR -1KGBeRjOGsesLYVw6sNYaPXQkImuWa8OIbEnatbp0KDn/9+i4xOL3StuJN97Ak1u -Giq4gwFbag4/QctBZ+5P0t77W+uzWcvEyNgK6rndfPWxqwmJSBFchY6O3s1l6NY8 -vSmyYhYRgFXEgX0nDumGfEXsF1Cj9tzYT2DUZc2f6+UCtXCD49qnoKawLhCrl5Uh -QGs82TR5FSn7zLW4MbFody6p8UDw6pYiWlPPR7fmgQKBgQDO3j5RCXf0276K56BA -rFpOOmivq3fxElRVCSRRRVPKHDYKQiPKnNXoa/pSl8a6CfjJaJzkNj+wTEFdQRGm -Ia123kR/1S21/zgGZNmbUGby+A4fKxBY101/JQweucRN7aw3XLKPXhOL1NPyKdWh -dARvjZvEl1qR6s07Y6jZgpkGqQKBgQDZmqVWvUgACdxkCYEzDf3Fc2G/8oL4VxWJ -HHr5zib+DDhTfKrgQyA9CZ97stZfrR7KYnsLJH8jnj/w/CNOI0G+41KroICRsnjT -5bm7/sT5uwLwu+FAQzITiehj7Te1lwsqtS8yOnXBTQ3hzaw9yhAsuhefx+WT2UCd -Y8Od13nhqQKBgQCR2LR8s71D/81F52nfTuRYNOvrtmtYpkCYt1pIhiU94EflUZ4k -UhCpzb7tjh5IuZEShtPePbUHWavX0HFd/G5s2OXYbnbM0oQwVdfpnXUHpgVmyhi7 -WghENN1nqDcTbha17X/ifkQvmLxZBk+chcw+zcrdfowXRkCtt2Sq/V1gCQKBgH/w -UK3C9AYxxgZ7IB9oZoAk6p/0cdSZPuwydotRDdPoU2WissTQMrAwbDhKWYg/PQ84 -/6b5elbywB1r4UYbrJgTB5Qo9e6zxB6xvpYtoJpDveLUVAd4eoTKXHwECPEXMVWW -2XzqqjlQmIzeZBqgJwplD2a+HNjkrvzanzS6b8qhAoGBAIun0EEc/Zc0ZxzgDPen -A9/7jV++QCrNsevxGH8yrhPP4UqTVSHGR9H+RAif7zTBTn0OwzSBz6hFbPmxum3m -cKabsKVN3poz3TBvfyhgjYosMWvCHpNhif09lyd/s2FezPGyK1Nyf5cKNEWjFGKw -+fCPJ/Ihp4iwacNU1Pu9m050 +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDLPI7bcyBwE49Q +TbN6+HAjgDLlWjNW+wMPA+y89608RYnkAodby0hZdZfUkJju65odDnxMyCivIwFO +MJC3JYKLKSrmKQfTBOUJ12y6V1mH1vfVeLGnxr7/ZRmc7RijHeGU9orfUHKShBRY +ulu3IyMolLsmDAdLiCLwOrOrRrPC4DfPS8ex0k0kfOkDT70bU8NI5a6/J3zOxJGk +/UOsbuhowqfPcAsfoSMHD7bLUoHzKOT/tyDt9C9cEjPghdZ/pw/TPBOMLXMYaFi7 +sgD4Q7MOX+4ysh5kaDZvUcU6DNDjeu0pjdvdOotDcc11rbrbcMOihPzugRGSjZZ6 +fY2t25zPAgMBAAECggEAE9CAJxIW4AZwKwagUIVnPXbSv3ynU7weRLj/vD6zg5RO +CM5cTw1HLP2jg2RjnKuYt2uBn+TF3qldh7eBbHG6RAIL/iuS6TZpdCeuII7CmlVR +jVz6iR594Z2EPUH6bHDN3P2adYI84V8CMtJcfcLtuxehFWkHzwvjSCOY/8JhZUbV +ebXXc3zPdSu+WmeManXnzs4VgE6QnSNdyk67fvE1Qxi18s49XXWBPTg01hn+v2yJ +QVuv36UP2MgIRZJE/PI9NL6tqqiHmY5sCIJ41hQLRxd/mnRC8hdHrfNNhqHVlC9g +JoQQwn/dD12EZwyiQyJyGZOmFDrfv7G3d2QQVJ4OLQKBgQDrxf3nRK28CWaV2evS +J4MZjTWmZGiNzMiqEtfTgd0v3+rs73WYaNfQ79Iejj6KJfJq7vtdawqGW1bPNfgF +KJCdr3yxjpv5GsHF7fiE8ZWcQ6d6FTWNuayLOEbHnPemYTqg5pd1wsPgIBoE9Zqm +zo1iuGxmwHos2yQgif9vEU99wwKBgQDcq/+aDscOO1oimJjAbBl95I8bOtSxR0Ip +pv/iaB8+rrS18jiAygXuo34tq+L0HmoniMCuuVg4zhgAxzgnohTlsJpyGnzkdkmo +TTan76WkFAedmurzQSu96p5F9HOc0MgluQHtPhO5SsjWhUgXxAU0Zoe+JnTVq0X+ +//8z1s64BQKBgEbanl4U7p0WuiSIc+0ZALX6EMhrXlxW0WsC9KdUXJNZmHER2WYv +A8R/fca++p5rnvlxzkqZs3UDGAh3cIykTymEJlX5xHfNCbSgulHBhDOMxVTT8N8h +kG/aPrMYQfhXOdZG1feGy3ScURVydcJxSl4DjFgouc6nIKlCr2fCbQAfAoGAVpez +3EtSNzZ5HzxMLK3+rtUihufmEI7K2rdqj/iV0i4SQZeELp2YCFXlrJxXmb3ZoBvc +qHOYt+m/p4aFdZ/3nU5YvM/CFJCKRN3PxcSXdjRZ7LGe4se/F25an07Wk0GmWI8p +v2Ptr3c2Kl/ws0q7VB2rxKUokbP86pygE0KGqdUCgYAf8G1QLDZMq57XsNBpiITY +xmS/vnmu2jj/DaTAiJ/gPkUaemoJ4xqhuIko7KqaNOBYoOMrOadldygNtrH1c5YE +LKdPYQ9/bASF59DnBotKAv79n2svHFHNXkpZA+kIoH7QwhgKpwo3vNwcJcKRIBB9 +MjMnBzho1vIbdhoIHJ+Egw== -----END PRIVATE KEY----- diff --git a/tests/data/wire/trans_pub b/tests/data/wire/trans_pub index 330ff42712..b090a7817d 100644 --- a/tests/data/wire/trans_pub +++ b/tests/data/wire/trans_pub @@ -1,9 +1,9 @@ -----BEGIN PUBLIC KEY----- -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr9daH4ONzZZign/wWRl3 -w5XdXjt8gPlZXyFIHP+L+FVQoeEJZNB8ThVxAEZgXIryUwogFXu3oGrTlNAa4HzX -jwdKMhugPHc113QL/6VhwETWLMyscpliFzLuVKURST7LmLJ7RwDmT92UmF/CjFai -XDQTZ0w6OVirOQQYl3JLr6EV12jr3ofMPs5iHGHWvnRoc9apjw2cLnXjzuWg60+z -gtgDZt3cdKAQKk60rPIlb4Y/Qfa8Frsytetk6cPUraPbFvydOdiek22Wadk+PtVg -Wws6WyrnjtecWJKm09ayVtWiCRRWQJmUCqQxwn860fvRlWMvKMV3qPsn9bLwysDu -kQIDAQAB +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyzyO23MgcBOPUE2zevhw +I4Ay5VozVvsDDwPsvPetPEWJ5AKHW8tIWXWX1JCY7uuaHQ58TMgoryMBTjCQtyWC +iykq5ikH0wTlCddsuldZh9b31Xixp8a+/2UZnO0Yox3hlPaK31BykoQUWLpbtyMj +KJS7JgwHS4gi8Dqzq0azwuA3z0vHsdJNJHzpA0+9G1PDSOWuvyd8zsSRpP1DrG7o +aMKnz3ALH6EjBw+2y1KB8yjk/7cg7fQvXBIz4IXWf6cP0zwTjC1zGGhYu7IA+EOz +Dl/uMrIeZGg2b1HFOgzQ43rtKY3b3TqLQ3HNda2623DDooT87oERko2Wen2Nrduc +zwIDAQAB -----END PUBLIC KEY----- diff --git a/tests/ga/test_agent_update_handler.py b/tests/ga/test_agent_update_handler.py index c6e41469f3..c2d01a424a 100644 --- a/tests/ga/test_agent_update_handler.py +++ b/tests/ga/test_agent_update_handler.py @@ -10,7 +10,8 @@ from azurelinuxagent.common.protocol.util import ProtocolUtil from azurelinuxagent.common.version import CURRENT_VERSION, AGENT_NAME -from azurelinuxagent.ga.agent_update_handler import get_agent_update_handler +from azurelinuxagent.ga.agent_update_handler import get_agent_update_handler, INITIAL_UPDATE_STATE_FILE, \ + RSM_UPDATE_STATE_FILE from azurelinuxagent.ga.guestagent import GuestAgent from tests.ga.test_update import UpdateTestCase from tests.lib.http_request_predicates import HttpRequestPredicates @@ -28,7 +29,7 @@ def setUp(self): clear_singleton_instances(ProtocolUtil) @contextlib.contextmanager - def _get_agent_update_handler(self, test_data=None, autoupdate_frequency=0.001, autoupdate_enabled=True, protocol_get_error=False, mock_get_header=None, mock_put_header=None): + def _get_agent_update_handler(self, test_data=None, autoupdate_frequency=0.001, autoupdate_enabled=True, initial_update_attempted=True, protocol_get_error=False, mock_get_header=None, mock_put_header=None): # Default to DATA_FILE of test_data parameter raises the pylint warning # W0102: Dangerous default value DATA_FILE (builtins.dict) as argument (dangerous-default-value) test_data = DATA_FILE if test_data is None else test_data @@ -57,6 +58,9 @@ def put_handler(url, *args, **_): protocol.set_http_handlers(http_get_handler=http_get_handler, http_put_handler=http_put_handler) + if initial_update_attempted: + open(os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE), "a").close() + with patch("azurelinuxagent.common.conf.get_autoupdate_enabled", return_value=autoupdate_enabled): with patch("azurelinuxagent.common.conf.get_autoupdate_frequency", return_value=autoupdate_frequency): with patch("azurelinuxagent.common.conf.get_autoupdate_gafamily", return_value="Prod"): @@ -407,6 +411,29 @@ def test_it_should_report_update_status_with_error_on_download_fail(self): self.assertEqual("9.9.9.10", vm_agent_update_status.expected_version) self.assertIn("Failed to download agent package from all URIs", vm_agent_update_status.message) + def test_it_should_not_report_error_status_if_new_rsm_version_is_same_as_current_after_last_update_attempt_failed(self): + data_file = DATA_FILE.copy() + data_file["ext_conf"] = "wire/ext_conf_rsm_version.xml" + + with self._get_agent_update_handler(test_data=data_file, protocol_get_error=True) as (agent_update_handler, _): + agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True) + vm_agent_update_status = agent_update_handler.get_vmagent_update_status() + self.assertEqual(VMAgentUpdateStatuses.Error, vm_agent_update_status.status) + self.assertEqual(1, vm_agent_update_status.code) + self.assertEqual("9.9.9.10", vm_agent_update_status.expected_version) + self.assertIn("Failed to download agent package from all URIs", vm_agent_update_status.message) + + # Send same version GS after last update attempt failed + agent_update_handler._protocol.mock_wire_data.set_version_in_agent_family( + str(CURRENT_VERSION)) + agent_update_handler._protocol.mock_wire_data.set_incarnation(2) + agent_update_handler._protocol.client.update_goal_state() + agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True) + vm_agent_update_status = agent_update_handler.get_vmagent_update_status() + self.assertEqual(VMAgentUpdateStatuses.Success, vm_agent_update_status.status) + self.assertEqual(0, vm_agent_update_status.code) + self.assertEqual(str(CURRENT_VERSION), vm_agent_update_status.expected_version) + def test_it_should_report_update_status_with_missing_rsm_version_error(self): data_file = DATA_FILE.copy() data_file['ext_conf'] = "wire/ext_conf_version_missing_in_agent_family.xml" @@ -452,7 +479,7 @@ def test_it_should_save_rsm_state_of_the_most_recent_goal_state(self): with self.assertRaises(AgentUpgradeExitException): agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True) - state_file = os.path.join(conf.get_lib_dir(), "rsm_update.json") + state_file = os.path.join(conf.get_lib_dir(), RSM_UPDATE_STATE_FILE) self.assertTrue(os.path.exists(state_file), "The rsm state file was not saved (can't find {0})".format(state_file)) # check if state gets updated if most recent goal state has different values @@ -535,3 +562,36 @@ def http_get_handler(uri, *_, **__): self.assertEqual(1, len([kwarg['message'] for _, kwarg in mock_telemetry.call_args_list if "Downloaded agent package: WALinuxAgent-9.9.9.10 is missing agent handler manifest file" in kwarg['message'] and kwarg[ 'op'] == WALAEventOperation.AgentUpgrade]), "Agent update should fail") + + def test_it_should_use_self_update_for_first_update_always(self): + self.prepare_agents(count=1) + + # mock the goal state as vm enrolled into RSM + data_file = DATA_FILE.copy() + data_file['ext_conf'] = "wire/ext_conf_rsm_version.xml" + with self._get_agent_update_handler(test_data=data_file, initial_update_attempted=False) as (agent_update_handler, mock_telemetry): + with self.assertRaises(AgentUpgradeExitException) as context: + agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True) + # Verifying agent used self-update for initial update + self._assert_update_discovered_from_agent_manifest(mock_telemetry, version="99999.0.0.0") + self._assert_agent_directories_exist_and_others_dont_exist(versions=[str(CURRENT_VERSION), "99999.0.0.0"]) + self._assert_agent_exit_process_telemetry_emitted(ustr(context.exception.reason)) + + state_file = os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE) + self.assertTrue(os.path.exists(state_file), + "The first update state file was not saved (can't find {0})".format(state_file)) + + def test_it_should_honor_any_update_type_after_first_update(self): + self.prepare_agents(count=1) + + data_file = DATA_FILE.copy() + data_file['ext_conf'] = "wire/ext_conf_rsm_version.xml" + # mocking initial update attempt as true + with self._get_agent_update_handler(test_data=data_file, initial_update_attempted=True) as (agent_update_handler, mock_telemetry): + with self.assertRaises(AgentUpgradeExitException) as context: + agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True) + + # Verifying agent honored RSM update + self._assert_agent_rsm_version_in_goal_state(mock_telemetry, version="9.9.9.10") + self._assert_agent_directories_exist_and_others_dont_exist(versions=["9.9.9.10", str(CURRENT_VERSION)]) + self._assert_agent_exit_process_telemetry_emitted(ustr(context.exception.reason)) diff --git a/tests/ga/test_cgroupapi.py b/tests/ga/test_cgroupapi.py index ad8ef80c2c..ae091ed9de 100644 --- a/tests/ga/test_cgroupapi.py +++ b/tests/ga/test_cgroupapi.py @@ -22,14 +22,21 @@ import subprocess import tempfile -from azurelinuxagent.ga.cgroupapi import CGroupsApi, SystemdCgroupsApi +from azurelinuxagent.common.exception import CGroupsException +from azurelinuxagent.ga.cgroupapi import SystemdCgroupApiv1, SystemdCgroupApiv2, CGroupUtil, get_cgroup_api, \ + InvalidCgroupMountpointException, CgroupV1, CgroupV2 from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.osutil import systemd from azurelinuxagent.common.utils import fileutil -from tests.lib.mock_cgroup_environment import mock_cgroup_environment +from azurelinuxagent.ga.cpucontroller import CpuControllerV1, CpuControllerV2 +from azurelinuxagent.ga.memorycontroller import MemoryControllerV1, MemoryControllerV2 +from tests.lib.mock_cgroup_environment import mock_cgroup_v1_environment, mock_cgroup_v2_environment, \ + mock_cgroup_hybrid_environment +from tests.lib.mock_environment import MockCommand from tests.lib.tools import AgentTestCase, patch, mock_sleep from tests.lib.cgroups_tools import CGroupsTools + class _MockedFileSystemTestCase(AgentTestCase): def setUp(self): AgentTestCase.setUp(self) @@ -39,7 +46,7 @@ def setUp(self): os.mkdir(os.path.join(self.cgroups_file_system_root, "cpu")) os.mkdir(os.path.join(self.cgroups_file_system_root, "memory")) - self.mock_cgroups_file_system_root = patch("azurelinuxagent.ga.cgroupapi.CGROUPS_FILE_SYSTEM_ROOT", self.cgroups_file_system_root) + self.mock_cgroups_file_system_root = patch("azurelinuxagent.ga.cgroupapi.CGROUP_FILE_SYSTEM_ROOT", self.cgroups_file_system_root) self.mock_cgroups_file_system_root.start() def tearDown(self): @@ -47,7 +54,7 @@ def tearDown(self): AgentTestCase.tearDown(self) -class CGroupsApiTestCase(_MockedFileSystemTestCase): +class CGroupUtilTestCase(AgentTestCase): def test_cgroups_should_be_supported_only_on_ubuntu16_centos7dot4_redhat7dot4_and_later_versions(self): test_cases = [ (['ubuntu', '16.04', 'xenial'], True), @@ -76,84 +83,228 @@ def test_cgroups_should_be_supported_only_on_ubuntu16_centos7dot4_redhat7dot4_an for (distro, supported) in test_cases: with patch("azurelinuxagent.ga.cgroupapi.get_distro", return_value=distro): - self.assertEqual(CGroupsApi.cgroups_supported(), supported, "cgroups_supported() failed on {0}".format(distro)) + self.assertEqual(CGroupUtil.cgroups_supported(), supported, "cgroups_supported() failed on {0}".format(distro)) class SystemdCgroupsApiTestCase(AgentTestCase): + def test_get_cgroup_api_raises_exception_when_systemd_mountpoint_does_not_exist(self): + with mock_cgroup_v1_environment(self.tmp_dir): + # Mock os.path.exists to return False for the os.path.exists(CGROUP_FILE_SYSTEM_ROOT) check + with patch("os.path.exists", return_value=False): + with self.assertRaises(InvalidCgroupMountpointException) as context: + get_cgroup_api() + self.assertTrue("Expected cgroup filesystem to be mounted at '/sys/fs/cgroup', but it is not" in str(context.exception)) + + def test_get_cgroup_api_is_v2_when_v2_in_use(self): + with mock_cgroup_v2_environment(self.tmp_dir): + self.assertIsInstance(get_cgroup_api(), SystemdCgroupApiv2) + + def test_get_cgroup_api_raises_exception_when_hybrid_in_use_and_controllers_available_in_unified_hierarchy(self): + with mock_cgroup_hybrid_environment(self.tmp_dir): + # Mock /sys/fs/cgroup/unified/cgroup.controllers file to have available controllers + with patch("os.path.exists", return_value=True): + with patch('azurelinuxagent.common.utils.fileutil.read_file', return_value="cpu memory"): + with self.assertRaises(CGroupsException) as context: + get_cgroup_api() + self.assertTrue("Detected hybrid cgroup mode, but there are controllers available to be enabled in unified hierarchy: cpu memory" in str(context.exception)) + + def test_get_cgroup_api_raises_exception_when_v1_in_use_and_controllers_have_non_sytemd_mountpoints(self): + with mock_cgroup_v1_environment(self.tmp_dir): + # Mock /sys/fs/cgroup/unified/cgroup.controllers file to have available controllers + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.are_mountpoints_systemd_created', return_value=False): + with self.assertRaises(InvalidCgroupMountpointException) as context: + get_cgroup_api() + self.assertTrue("Expected cgroup controllers to be mounted at '/sys/fs/cgroup', but at least one is not." in str(context.exception)) + + def test_get_cgroup_api_is_v1_when_v1_in_use(self): + with mock_cgroup_v1_environment(self.tmp_dir): + self.assertIsInstance(get_cgroup_api(), SystemdCgroupApiv1) + + def test_get_cgroup_api_is_v1_when_hybrid_in_use(self): + with mock_cgroup_hybrid_environment(self.tmp_dir): + # Mock os.path.exists to return True for the os.path.exists('/sys/fs/cgroup/cgroup.controllers') check + with patch("os.path.exists", return_value=True): + self.assertIsInstance(get_cgroup_api(), SystemdCgroupApiv1) + + def test_get_cgroup_api_raises_exception_when_cgroup_mode_cannot_be_determined(self): + unknown_cgroup_type = "unknown_cgroup_type" + with patch('azurelinuxagent.common.utils.shellutil.run_command', return_value=unknown_cgroup_type): + with self.assertRaises(CGroupsException) as context: + get_cgroup_api() + self.assertTrue("/sys/fs/cgroup has an unexpected file type: {0}".format(unknown_cgroup_type) in str(context.exception)) + def test_get_systemd_version_should_return_a_version_number(self): - with mock_cgroup_environment(self.tmp_dir): - version_info = systemd.get_version() - found = re.search(r"systemd \d+", version_info) is not None - self.assertTrue(found, "Could not determine the systemd version: {0}".format(version_info)) - - def test_get_cpu_and_memory_mount_points_should_return_the_cgroup_mount_points(self): - with mock_cgroup_environment(self.tmp_dir): - cpu, memory = SystemdCgroupsApi().get_cgroup_mount_points() - self.assertEqual(cpu, '/sys/fs/cgroup/cpu,cpuacct', "The mount point for the CPU controller is incorrect") - self.assertEqual(memory, '/sys/fs/cgroup/memory', "The mount point for the memory controller is incorrect") - - def test_get_service_cgroup_paths_should_return_the_cgroup_mount_points(self): - with mock_cgroup_environment(self.tmp_dir): - cpu, memory = SystemdCgroupsApi().get_unit_cgroup_paths("extension.service") - self.assertIn(cpu, '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service', - "The mount point for the CPU controller is incorrect") - self.assertIn(memory, '/sys/fs/cgroup/memory/system.slice/extension.service', - "The mount point for the memory controller is incorrect") - - def test_get_cpu_and_memory_cgroup_relative_paths_for_process_should_return_the_cgroup_relative_paths(self): - with mock_cgroup_environment(self.tmp_dir): - cpu, memory = SystemdCgroupsApi.get_process_cgroup_relative_paths('self') - self.assertEqual(cpu, "system.slice/walinuxagent.service", "The relative path for the CPU cgroup is incorrect") - self.assertEqual(memory, "system.slice/walinuxagent.service", "The relative memory for the CPU cgroup is incorrect") - - def test_get_cgroup2_controllers_should_return_the_v2_cgroup_controllers(self): - with mock_cgroup_environment(self.tmp_dir): - mount_point, controllers = SystemdCgroupsApi.get_cgroup2_controllers() - - self.assertEqual(mount_point, "/sys/fs/cgroup/unified", "Invalid mount point for V2 cgroups") - self.assertIn("cpu", controllers, "The CPU controller is not in the list of V2 controllers") - self.assertIn("memory", controllers, "The memory controller is not in the list of V2 controllers") + # We expect same behavior for v1 and v2 + mock_envs = [mock_cgroup_v1_environment(self.tmp_dir), mock_cgroup_v2_environment(self.tmp_dir)] + for env in mock_envs: + with env: + version_info = systemd.get_version() + found = re.search(r"systemd \d+", version_info) is not None + self.assertTrue(found, "Could not determine the systemd version: {0}".format(version_info)) def test_get_unit_property_should_return_the_value_of_the_given_property(self): - with mock_cgroup_environment(self.tmp_dir): - cpu_accounting = systemd.get_unit_property("walinuxagent.service", "CPUAccounting") - - self.assertEqual(cpu_accounting, "no", "Property {0} of {1} is incorrect".format("CPUAccounting", "walinuxagent.service")) - - def assert_cgroups_created(self, extension_cgroups): - self.assertEqual(len(extension_cgroups), 2, - 'start_extension_command did not return the expected number of cgroups') - - cpu_found = memory_found = False - - for cgroup in extension_cgroups: - match = re.match( - r'^/sys/fs/cgroup/(cpu|memory)/system.slice/Microsoft.Compute.TestExtension_1\.2\.3\_([a-f0-9-]+)\.scope$', - cgroup.path) - - self.assertTrue(match is not None, "Unexpected path for cgroup: {0}".format(cgroup.path)) - - if match.group(1) == 'cpu': - cpu_found = True - if match.group(1) == 'memory': - memory_found = True - - self.assertTrue(cpu_found, 'start_extension_command did not return a cpu cgroup') - self.assertTrue(memory_found, 'start_extension_command did not return a memory cgroup') + # We expect same behavior for v1 and v2 + mock_envs = [mock_cgroup_v1_environment(self.tmp_dir), mock_cgroup_v2_environment(self.tmp_dir)] + for env in mock_envs: + with env: + cpu_accounting = systemd.get_unit_property("walinuxagent.service", "CPUAccounting") + + self.assertEqual(cpu_accounting, "no", "Property {0} of {1} is incorrect".format("CPUAccounting", "walinuxagent.service")) + + +class SystemdCgroupsApiv1TestCase(AgentTestCase): + def test_get_controller_mountpoints_should_return_only_supported_controllers(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup_api = get_cgroup_api() + # Expected value comes from findmnt output in the mocked environment + self.assertEqual(cgroup_api._get_controller_mountpoints(), { + 'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', + 'memory': '/sys/fs/cgroup/memory' + }, "The controller mountpoints are not correct") + + def test_are_mountpoints_systemd_created_should_return_False_if_mountpoints_are_not_systemd(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/custom/mountpoint/path', 'memory': '/custom/mountpoint/path'}): + self.assertFalse(SystemdCgroupApiv1().are_mountpoints_systemd_created()) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/custom/mountpoint/path'}): + self.assertFalse(SystemdCgroupApiv1().are_mountpoints_systemd_created()) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'memory': '/custom/mountpoint/path'}): + self.assertFalse(SystemdCgroupApiv1().are_mountpoints_systemd_created()) + + def test_are_mountpoints_systemd_created_should_return_True_if_mountpoints_are_systemd(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory': '/sys/fs/cgroup/memory'}): + self.assertTrue(SystemdCgroupApiv1().are_mountpoints_systemd_created()) + + # are_mountpoints_systemd_created should only check controllers which are mounted + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}): + self.assertTrue(SystemdCgroupApiv1().are_mountpoints_systemd_created()) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'memory': '/sys/fs/cgroup/memory'}): + self.assertTrue(SystemdCgroupApiv1().are_mountpoints_systemd_created()) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}): + self.assertTrue(SystemdCgroupApiv1().are_mountpoints_systemd_created()) + + def test_get_relative_paths_for_process_should_return_the_cgroup_v1_relative_paths(self): + with mock_cgroup_v1_environment(self.tmp_dir): + relative_paths = get_cgroup_api()._get_process_relative_controller_paths('self') + self.assertEqual(len(relative_paths), 2) + self.assertEqual(relative_paths.get('cpu,cpuacct'), "system.slice/walinuxagent.service", "The relative path for the CPU cgroup is incorrect") + self.assertEqual(relative_paths.get('memory'), "system.slice/walinuxagent.service", "The relative memory for the memory cgroup is incorrect") + + def test_get_unit_cgroup_should_return_correct_paths_for_cgroup_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "extension") + self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct':'/sys/fs/cgroup/cpu,cpuacct', 'memory':'/sys/fs/cgroup/memory'}) + self.assertEqual(cgroup._controller_paths, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service', 'memory': '/sys/fs/cgroup/memory/system.slice/extension.service'}) + + def test_get_unit_cgroup_should_return_only_mounted_controllers_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}): + cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "extension") + self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct':'/sys/fs/cgroup/cpu,cpuacct'}) + self.assertEqual(cgroup._controller_paths, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'}) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}): + cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "extension") + self.assertEqual(cgroup._controller_mountpoints, {}) + self.assertEqual(cgroup._controller_paths, {}) + + def test_get_cgroup_from_relative_path_should_return_the_correct_paths_for_cgroup_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "test_cgroup") + self.assertEqual(cgroup._controller_mountpoints, + {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory': '/sys/fs/cgroup/memory'}) + self.assertEqual(cgroup._controller_paths, + {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/some/relative/path', + 'memory': '/sys/fs/cgroup/memory/some/relative/path'}) + + def test_get_cgroup_from_relative_path_should_return_only_mounted_controllers_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}): + cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "test_cgroup") + self.assertEqual(cgroup._controller_mountpoints, + {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}) + self.assertEqual(cgroup._controller_paths, + {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/some/relative/path'}) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}): + cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "test_cgroup") + self.assertEqual(cgroup._controller_mountpoints, {}) + self.assertEqual(cgroup._controller_paths, {}) + + def test_get_process_cgroup_should_return_the_correct_paths_for_cgroup_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "walinuxagent") + self.assertEqual(cgroup._controller_mountpoints, + {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory': '/sys/fs/cgroup/memory'}) + self.assertEqual(cgroup._controller_paths, + {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service', + 'memory': '/sys/fs/cgroup/memory/system.slice/walinuxagent.service'}) + + def test_get_process_cgroup_should_return_only_mounted_controllers_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "walinuxagent") + self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}) + self.assertEqual(cgroup._controller_paths, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service'}) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "walinuxagent") + self.assertEqual(cgroup._controller_mountpoints, {}) + self.assertEqual(cgroup._controller_paths, {}) + + def test_get_process_cgroup_should_return_only_mounted_process_controllers_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'relative/path'}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "walinuxagent") + self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory':'/sys/fs/cgroup/memory'}) + self.assertEqual(cgroup._controller_paths, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/relative/path'}) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertIsInstance(cgroup, CgroupV1) + self.assertEqual(cgroup._cgroup_name, "walinuxagent") + self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory':'/sys/fs/cgroup/memory'}) + self.assertEqual(cgroup._controller_paths, {}) @patch('time.sleep', side_effect=lambda _: mock_sleep()) - def test_start_extension_command_should_return_the_command_output(self, _): - original_popen = subprocess.Popen + def test_start_extension_cgroups_v1_command_should_return_the_command_output(self, _): + with mock_cgroup_v1_environment(self.tmp_dir): + original_popen = subprocess.Popen - def mock_popen(command, *args, **kwargs): - if command.startswith('systemd-run --property'): - command = "echo TEST_OUTPUT" - return original_popen(command, *args, **kwargs) + def mock_popen(command, *args, **kwargs): + if isinstance(command, str) and command.startswith('systemd-run --property'): + command = "echo TEST_OUTPUT" + return original_popen(command, *args, **kwargs) - with mock_cgroup_environment(self.tmp_dir): with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as output_file: - with patch("subprocess.Popen", side_effect=mock_popen) as popen_patch: # pylint: disable=unused-variable - command_output = SystemdCgroupsApi().start_extension_command( + with patch("subprocess.Popen", + side_effect=mock_popen) as popen_patch: # pylint: disable=unused-variable + command_output = get_cgroup_api().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="A_TEST_COMMAND", cmd_name="test", @@ -167,9 +318,9 @@ def mock_popen(command, *args, **kwargs): self.assertIn("[stdout]\nTEST_OUTPUT\n", command_output, "The test output was not captured") @patch('time.sleep', side_effect=lambda _: mock_sleep()) - def test_start_extension_command_should_execute_the_command_in_a_cgroup(self, _): - with mock_cgroup_environment(self.tmp_dir): - SystemdCgroupsApi().start_extension_command( + def test_start_extension_cgroups_v1_command_should_execute_the_command_in_a_cgroup(self, _): + with mock_cgroup_v1_environment(self.tmp_dir): + get_cgroup_api().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="test command", cmd_name="test", @@ -183,18 +334,20 @@ def test_start_extension_command_should_execute_the_command_in_a_cgroup(self, _) tracked = CGroupsTelemetry._tracked self.assertTrue( - any(cg for cg in tracked.values() if cg.name == 'Microsoft.Compute.TestExtension-1.2.3' and 'cpu' in cg.path), + any(cg for cg in tracked.values() if + cg.name == 'Microsoft.Compute.TestExtension-1.2.3' and 'cpu' in cg.path), "The extension's CPU is not being tracked") self.assertTrue( - any(cg for cg in tracked.values() if cg.name == 'Microsoft.Compute.TestExtension-1.2.3' and 'memory' in cg.path), + any(cg for cg in tracked.values() if + cg.name == 'Microsoft.Compute.TestExtension-1.2.3' and 'memory' in cg.path), "The extension's Memory is not being tracked") @patch('time.sleep', side_effect=lambda _: mock_sleep()) - def test_start_extension_command_should_use_systemd_to_execute_the_command(self, _): - with mock_cgroup_environment(self.tmp_dir): + def test_start_extension_cgroups_v1_command_should_use_systemd_to_execute_the_command(self, _): + with mock_cgroup_v1_environment(self.tmp_dir): with patch("subprocess.Popen", wraps=subprocess.Popen) as popen_patch: - SystemdCgroupsApi().start_extension_command( + get_cgroup_api().start_extension_command( extension_name="Microsoft.Compute.TestExtension-1.2.3", command="the-test-extension-command", cmd_name="test", @@ -205,12 +358,132 @@ def test_start_extension_command_should_use_systemd_to_execute_the_command(self, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if "the-test-extension-command" in args[0]] + extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if + "the-test-extension-command" in args[0]] self.assertEqual(1, len(extension_calls), "The extension should have been invoked exactly once") self.assertIn("systemd-run", extension_calls[0], "The extension should have been invoked using systemd") +class SystemdCgroupsApiv2TestCase(AgentTestCase): + def test_get_root_cgroup_path_should_return_v2_cgroup_root(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup_api = get_cgroup_api() + self.assertEqual(cgroup_api._get_root_cgroup_path(), '/sys/fs/cgroup') + + def test_get_root_cgroup_path_should_only_match_systemd_mountpoint(self): + with mock_cgroup_v2_environment(self.tmp_dir) as env: + # Mock an environment which has multiple v2 mountpoints + env.add_command(MockCommand(r"^findmnt -t cgroup2 --noheadings$", + '''/custom/mountpoint/path1 cgroup2 cgroup2 rw,relatime + /sys/fs/cgroup cgroup2 cgroup2 rw,nosuid,nodev,noexec,relatime + /custom/mountpoint/path2 none cgroup2 rw,relatime + ''')) + cgroup_api = get_cgroup_api() + self.assertEqual(cgroup_api._get_root_cgroup_path(), '/sys/fs/cgroup') + + def test_get_controllers_enabled_at_root_should_return_list_of_agent_supported_and_enabled_controllers(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup_api = get_cgroup_api() + enabled_controllers = cgroup_api._get_controllers_enabled_at_root('/sys/fs/cgroup') + self.assertEqual(len(enabled_controllers), 2) + self.assertIn('cpu', enabled_controllers) + self.assertIn('memory', enabled_controllers) + + def test_get_controllers_enabled_at_root_should_return_empty_list_if_root_cgroup_path_is_empty(self): + with mock_cgroup_v2_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""): + cgroup_api = get_cgroup_api() + self.assertEqual(cgroup_api._controllers_enabled_at_root, []) + + def test_get_process_relative_cgroup_path_should_return_relative_path(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup_api = get_cgroup_api() + self.assertEqual(cgroup_api._get_process_relative_cgroup_path(process_id="self"), "system.slice/walinuxagent.service") + + def test_get_unit_cgroup_should_return_correct_paths_for_cgroup_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension") + self.assertIsInstance(cgroup, CgroupV2) + self.assertEqual(cgroup._cgroup_name, "extension") + self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup") + self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/system.slice/extension.service") + self.assertEqual(len(cgroup._enabled_controllers), 2) + self.assertIn('cpu', cgroup._enabled_controllers) + self.assertIn('memory', cgroup._enabled_controllers) + + def test_get_unit_cgroup_should_return_empty_paths_if_root_path_empty_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""): + cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension") + self.assertIsInstance(cgroup, CgroupV2) + self.assertEqual(cgroup._cgroup_name, "extension") + self.assertEqual(cgroup._root_cgroup_path, "") + self.assertEqual(cgroup._cgroup_path, "") + self.assertEqual(len(cgroup._enabled_controllers), 0) + + def test_get_unit_cgroup_should_return_only_enabled_controllers_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=['cpu']): + cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension") + self.assertIsInstance(cgroup, CgroupV2) + self.assertEqual(cgroup._cgroup_name, "extension") + self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup") + self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/system.slice/extension.service") + self.assertEqual(len(cgroup._enabled_controllers), 1) + self.assertIn('cpu', cgroup._enabled_controllers) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=[]): + cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension") + self.assertIsInstance(cgroup, CgroupV2) + self.assertEqual(cgroup._cgroup_name, "extension") + self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup") + self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/system.slice/extension.service") + self.assertEqual(len(cgroup._enabled_controllers), 0) + + def test_get_cgroup_from_relative_path_should_return_the_correct_paths_for_cgroup_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup") + self.assertIsInstance(cgroup, CgroupV2) + self.assertEqual(cgroup._cgroup_name, "test_cgroup") + self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup") + self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/some/relative/path") + self.assertEqual(len(cgroup._enabled_controllers), 2) + self.assertIn('cpu', cgroup._enabled_controllers) + self.assertIn('memory', cgroup._enabled_controllers) + + def test_get_cgroup_from_relative_path_should_return_empty_paths_if_root_path_empty_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""): + cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup") + self.assertIsInstance(cgroup, CgroupV2) + self.assertEqual(cgroup._cgroup_name, "test_cgroup") + self.assertEqual(cgroup._root_cgroup_path, "") + self.assertEqual(cgroup._cgroup_path, "") + self.assertEqual(len(cgroup._enabled_controllers), 0) + + def test_get_process_cgroup_should_return_the_correct_paths_for_cgroup_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertIsInstance(cgroup, CgroupV2) + self.assertEqual(cgroup._cgroup_name, "walinuxagent") + self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup") + self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/system.slice/walinuxagent.service") + self.assertEqual(len(cgroup._enabled_controllers), 2) + self.assertIn('cpu', cgroup._enabled_controllers) + self.assertIn('memory', cgroup._enabled_controllers) + + def test_get_process_cgroup_should_return_empty_paths_if_root_path_empty_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertIsInstance(cgroup, CgroupV2) + self.assertEqual(cgroup._cgroup_name, "walinuxagent") + self.assertEqual(cgroup._root_cgroup_path, "") + self.assertEqual(cgroup._cgroup_path, "") + self.assertEqual(len(cgroup._enabled_controllers), 0) + + class SystemdCgroupsApiMockedFileSystemTestCase(_MockedFileSystemTestCase): def test_cleanup_legacy_cgroups_should_remove_legacy_cgroups(self): # Set up a mock /var/run/waagent.pid file @@ -222,8 +495,232 @@ def test_cleanup_legacy_cgroups_should_remove_legacy_cgroups(self): legacy_memory_cgroup = CGroupsTools.create_legacy_agent_cgroup(self.cgroups_file_system_root, "memory", '') with patch("azurelinuxagent.ga.cgroupapi.get_agent_pid_file_path", return_value=daemon_pid_file): - legacy_cgroups = SystemdCgroupsApi().cleanup_legacy_cgroups() + legacy_cgroups = CGroupUtil.cleanup_legacy_cgroups() self.assertEqual(legacy_cgroups, 2, "cleanup_legacy_cgroups() did not find all the expected cgroups") self.assertFalse(os.path.exists(legacy_cpu_cgroup), "cleanup_legacy_cgroups() did not remove the CPU legacy cgroup") self.assertFalse(os.path.exists(legacy_memory_cgroup), "cleanup_legacy_cgroups() did not remove the memory legacy cgroup") + + +class CgroupsApiv1TestCase(AgentTestCase): + def test_get_supported_controllers_returns_v1_controllers(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_supported_controller_names() + self.assertEqual(len(controllers), 2) + self.assertIn('cpu,cpuacct', controllers) + self.assertIn('memory', controllers) + + def test_check_in_expected_slice_returns_True_if_all_paths_in_expected_slice(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertTrue(cgroup.check_in_expected_slice(expected_slice='system.slice')) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'system.slice/walinuxagent.service'}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertTrue(cgroup.check_in_expected_slice(expected_slice='system.slice')) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertTrue(cgroup.check_in_expected_slice(expected_slice='system.slice')) + + def test_check_in_expected_slice_returns_False_if_any_paths_not_in_expected_slice(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertFalse(cgroup.check_in_expected_slice(expected_slice='user.slice')) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'system.slice/walinuxagent.service', 'memory': 'user.slice/walinuxagent.service'}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertFalse(cgroup.check_in_expected_slice(expected_slice='user.slice')) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': '', 'memory': ''}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertFalse(cgroup.check_in_expected_slice(expected_slice='system.slice')) + + def test_get_controllers_returns_all_supported_controllers_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 2) + self.assertIsInstance(controllers[0], CpuControllerV1) + self.assertEqual(controllers[0].name, "walinuxagent") + self.assertEqual(controllers[0].path, "/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service") + self.assertIsInstance(controllers[1], MemoryControllerV1) + self.assertEqual(controllers[1].name, "walinuxagent") + self.assertEqual(controllers[1].path, "/sys/fs/cgroup/memory/system.slice/walinuxagent.service") + + def test_get_controllers_returns_only_mounted_controllers_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 1) + self.assertIsInstance(controllers[0], CpuControllerV1) + self.assertEqual(controllers[0].name, "walinuxagent") + self.assertEqual(controllers[0].path, "/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service") + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'memory': '/sys/fs/cgroup/memory'}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 1) + self.assertIsInstance(controllers[0], MemoryControllerV1) + self.assertEqual(controllers[0].name, "walinuxagent") + self.assertEqual(controllers[0].path, "/sys/fs/cgroup/memory/system.slice/walinuxagent.service") + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 0) + + def test_get_controllers_returns_only_controllers_at_expected_path_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'system.slice/walinuxagent.service', 'memory': 'unexpected/path'}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers(expected_relative_path="system.slice/walinuxagent.service") + self.assertEqual(len(controllers), 1) + self.assertIsInstance(controllers[0], CpuControllerV1) + self.assertEqual(controllers[0].name, "walinuxagent") + self.assertEqual(controllers[0].path, "/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service") + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'unexpected/path', 'memory': 'unexpected/path'}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers(expected_relative_path="system.slice/walinuxagent.service") + self.assertEqual(len(controllers), 0) + + def test_get_procs_path_returns_correct_path_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + procs_path = cgroup.get_controller_procs_path(controller='cpu,cpuacct') + self.assertEqual(procs_path, "/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service/cgroup.procs") + + procs_path = cgroup.get_controller_procs_path(controller='memory') + self.assertEqual(procs_path, "/sys/fs/cgroup/memory/system.slice/walinuxagent.service/cgroup.procs") + + def test_get_processes_returns_processes_at_all_controller_paths_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + procs = cgroup.get_processes() + self.assertEqual(len(procs), 3) + self.assertIn(int(123), procs) + self.assertIn(int(234), procs) + self.assertIn(int(345), procs) + + def test_get_processes_returns_empty_list_if_no_controllers_mounted_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + procs = cgroup.get_processes() + self.assertIsInstance(procs, list) + self.assertEqual(len(procs), 0) + + def test_get_processes_returns_empty_list_if_procs_path_empty_v1(self): + with mock_cgroup_v1_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.CgroupV1.get_controller_procs_path', return_value=""): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + procs = cgroup.get_processes() + self.assertIsInstance(procs, list) + self.assertEqual(len(procs), 0) + + +class CgroupsApiv2TestCase(AgentTestCase): + def test_get_supported_controllers_returns_v2_controllers(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_supported_controller_names() + self.assertEqual(len(controllers), 2) + self.assertIn('cpu', controllers) + self.assertIn('memory', controllers) + + def test_check_in_expected_slice_returns_True_if_cgroup_path_in_expected_slice(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertTrue(cgroup.check_in_expected_slice(expected_slice='system.slice')) + + def test_check_in_expected_slice_returns_False_if_cgroup_path_not_in_expected_slice(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertFalse(cgroup.check_in_expected_slice(expected_slice='user.slice')) + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_process_relative_cgroup_path', return_value=""): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + self.assertFalse(cgroup.check_in_expected_slice(expected_slice='system.slice')) + + def test_get_controllers_returns_all_supported_controllers_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 2) + self.assertIsInstance(controllers[0], CpuControllerV2) + self.assertEqual(controllers[0].name, "walinuxagent") + self.assertEqual(controllers[0].path, "/sys/fs/cgroup/system.slice/walinuxagent.service") + self.assertIsInstance(controllers[1], MemoryControllerV2) + self.assertEqual(controllers[1].name, "walinuxagent") + self.assertEqual(controllers[1].path, "/sys/fs/cgroup/system.slice/walinuxagent.service") + + def test_get_controllers_returns_only_enabled_controllers_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=["cpu"]): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 1) + self.assertIsInstance(controllers[0], CpuControllerV2) + self.assertEqual(controllers[0].name, "walinuxagent") + self.assertEqual(controllers[0].path, "/sys/fs/cgroup/system.slice/walinuxagent.service") + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=["memory"]): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 1) + self.assertIsInstance(controllers[0], MemoryControllerV2) + self.assertEqual(controllers[0].name, "walinuxagent") + self.assertEqual(controllers[0].path, "/sys/fs/cgroup/system.slice/walinuxagent.service") + + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=[]): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 0) + + def test_get_controllers_returns_empty_if_cgroup_path_is_empty_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + mock_cgroup_empty_path = CgroupV2(cgroup_name="test", root_cgroup_path="/sys/fs/cgroup", cgroup_path="", enabled_controllers=["cpu", "memory"]) + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup", return_value=mock_cgroup_empty_path): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers() + self.assertEqual(len(controllers), 0) + + def test_get_controllers_returns_only_controllers_at_expected_path_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + mock_cgroup_unexpected_path = CgroupV2(cgroup_name="test", root_cgroup_path="/sys/fs/cgroup", cgroup_path="/sys/fs/cgroup/unexpected/path", enabled_controllers=["cpu", "memory"]) + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup", return_value=mock_cgroup_unexpected_path): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + controllers = cgroup.get_controllers(expected_relative_path="system.slice/walinuxagent.service") + self.assertEqual(len(controllers), 0) + + def test_get_procs_path_returns_empty_if_root_cgroup_empty_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + procs_path = cgroup.get_procs_path() + self.assertEqual(procs_path, "") + + def test_get_procs_path_returns_correct_path_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + procs_path = cgroup.get_procs_path() + self.assertEqual(procs_path, "/sys/fs/cgroup/system.slice/walinuxagent.service/cgroup.procs") + + def test_get_processes_returns_processes_at_all_controller_paths_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + procs = cgroup.get_processes() + self.assertEqual(len(procs), 3) + self.assertIn(int(123), procs) + self.assertIn(int(234), procs) + self.assertIn(int(345), procs) + + def test_get_processes_returns_empty_list_if_root_cgroup_empty_v2(self): + with mock_cgroup_v2_environment(self.tmp_dir): + with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""): + cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent") + procs = cgroup.get_processes() + self.assertEqual(len(procs), 0) diff --git a/tests/ga/test_cgroupconfigurator.py b/tests/ga/test_cgroupconfigurator.py index b5a9e09941..1ea7d9325c 100644 --- a/tests/ga/test_cgroupconfigurator.py +++ b/tests/ga/test_cgroupconfigurator.py @@ -26,20 +26,18 @@ import time import threading -from nose.plugins.attrib import attr - from azurelinuxagent.common import conf -from azurelinuxagent.ga.cgroup import AGENT_NAME_TELEMETRY, MetricsCounter, MetricValue, MetricsCategory, CpuCgroup +from azurelinuxagent.ga.cgroupcontroller import AGENT_NAME_TELEMETRY, MetricsCounter, MetricValue, MetricsCategory from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator, DisableCgroups from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.event import WALAEventOperation -from azurelinuxagent.common.exception import CGroupsException, ExtensionError, ExtensionErrorCodes, \ - AgentMemoryExceededException +from azurelinuxagent.common.exception import CGroupsException, AgentMemoryExceededException from azurelinuxagent.common.future import ustr from azurelinuxagent.common.utils import shellutil, fileutil +from azurelinuxagent.ga.cpucontroller import CpuControllerV1 from tests.lib.mock_environment import MockCommand -from tests.lib.mock_cgroup_environment import mock_cgroup_environment, UnitFilePaths -from tests.lib.tools import AgentTestCase, patch, mock_sleep, i_am_root, data_dir, is_python_version_26_or_34, skip_if_predicate_true +from tests.lib.mock_cgroup_environment import mock_cgroup_v1_environment, UnitFilePaths, mock_cgroup_v2_environment +from tests.lib.tools import AgentTestCase, patch, mock_sleep, data_dir, is_python_version_26_or_34, skip_if_predicate_true from tests.lib.miscellaneous_tools import format_processes, wait_for @@ -49,12 +47,34 @@ def tearDownClass(cls): CGroupConfigurator._instance = None AgentTestCase.tearDownClass() + def tearDown(self): + CGroupConfigurator._instance = None + AgentTestCase.tearDown(self) + @contextlib.contextmanager def _get_cgroup_configurator(self, initialize=True, enable=True, mock_commands=None): CGroupConfigurator._instance = None configurator = CGroupConfigurator.get_instance() CGroupsTelemetry.reset() - with mock_cgroup_environment(self.tmp_dir) as mock_environment: + with mock_cgroup_v1_environment(self.tmp_dir) as mock_environment: + if mock_commands is not None: + for command in mock_commands: + mock_environment.add_command(command) + configurator.mocks = mock_environment + if initialize: + if not enable: + with patch.object(configurator, "enable"): + configurator.initialize() + else: + configurator.initialize() + yield configurator + + @contextlib.contextmanager + def _get_cgroup_configurator_v2(self, initialize=True, enable=True, mock_commands=None): + CGroupConfigurator._instance = None + configurator = CGroupConfigurator.get_instance() + CGroupsTelemetry.reset() + with mock_cgroup_v2_environment(self.tmp_dir) as mock_environment: if mock_commands is not None: for command in mock_commands: mock_environment.add_command(command) @@ -67,10 +87,23 @@ def _get_cgroup_configurator(self, initialize=True, enable=True, mock_commands=N configurator.initialize() yield configurator - def test_initialize_should_enable_cgroups(self): + def test_initialize_should_enable_cgroups_v1(self): with self._get_cgroup_configurator() as configurator: self.assertTrue(configurator.enabled(), "cgroups were not enabled") + def test_initialize_should_not_enable_cgroups_v2(self): + with self._get_cgroup_configurator_v2() as configurator: + self.assertFalse(configurator.enabled(), "cgroups were enabled") + + def test_initialize_should_not_enable_when_cgroup_api_cannot_be_determined(self): + # Mock cgroup api to raise CGroupsException + def mock_get_cgroup_api(): + raise CGroupsException("") + + with patch('azurelinuxagent.ga.cgroupconfigurator.get_cgroup_api', side_effect=mock_get_cgroup_api): + with self._get_cgroup_configurator() as configurator: + self.assertFalse(configurator.enabled(), "cgroups were enabled") + def test_initialize_should_start_tracking_the_agent_cgroups(self): with self._get_cgroup_configurator() as configurator: tracked = CGroupsTelemetry._tracked @@ -82,18 +115,18 @@ def test_initialize_should_start_tracking_the_agent_cgroups(self): "The Agent's Memory is not being tracked. Tracked: {0}".format(tracked)) def test_initialize_should_start_tracking_other_controllers_when_one_is_not_present(self): - command_mocks = [MockCommand(r"^mount -t cgroup$", -'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd) -cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma) -cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) -cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio) -cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event) -cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb) -cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) -cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids) -cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices) -cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct) -cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) + command_mocks = [MockCommand(r"^findmnt -t cgroup --noheadings$", +'''/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd +/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices +/sys/fs/cgroup/rdma cgroup cgroup rw,nosuid,nodev,noexec,relatime,rdma +/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event +/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio +/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio +/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset +/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct +/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer +/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb +/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids ''')] with self._get_cgroup_configurator(mock_commands=command_mocks) as configurator: tracked = CGroupsTelemetry._tracked @@ -103,17 +136,17 @@ def test_initialize_should_start_tracking_other_controllers_when_one_is_not_pres "The Agent's memory should not be tracked. Tracked: {0}".format(tracked)) def test_initialize_should_not_enable_cgroups_when_the_cpu_and_memory_controllers_are_not_present(self): - command_mocks = [MockCommand(r"^mount -t cgroup$", -'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd) -cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma) -cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) -cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio) -cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event) -cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb) -cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) -cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids) -cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices) -cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) + command_mocks = [MockCommand(r"^findmnt -t cgroup --noheadings$", +'''/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd +/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices +/sys/fs/cgroup/rdma cgroup cgroup rw,nosuid,nodev,noexec,relatime,rdma +/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event +/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio +/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio +/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset +/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer +/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb +/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids ''')] with self._get_cgroup_configurator(mock_commands=command_mocks) as configurator: tracked = CGroupsTelemetry._tracked @@ -122,17 +155,17 @@ def test_initialize_should_not_enable_cgroups_when_the_cpu_and_memory_controller self.assertEqual(len(tracked), 0, "No cgroups should be tracked. Tracked: {0}".format(tracked)) def test_initialize_should_not_enable_cgroups_when_the_agent_is_not_in_the_system_slice(self): - command_mocks = [MockCommand(r"^mount -t cgroup$", -'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd) -cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma) -cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) -cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio) -cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event) -cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb) -cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) -cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids) -cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices) -cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) + command_mocks = [MockCommand(r"^findmnt -t cgroup --noheadings$", +'''/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd* +/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices +/sys/fs/cgroup/rdma cgroup cgroup rw,nosuid,nodev,noexec,relatime,rdma +/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event +/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio +/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio +/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset +/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer +/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb +/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids ''')] with self._get_cgroup_configurator(mock_commands=command_mocks) as configurator: @@ -188,26 +221,20 @@ def test_initialize_should_create_unit_files_when_the_agent_service_file_is_not_ self.assertTrue(os.path.exists(agent_drop_in_file_cpu_accounting), "{0} was not created".format(agent_drop_in_file_cpu_accounting)) self.assertTrue(os.path.exists(agent_drop_in_file_memory_accounting), "{0} was not created".format(agent_drop_in_file_memory_accounting)) - def test_initialize_should_update_logcollector_memorylimit(self): + def test_initialize_should_clear_logcollector_slice(self): with self._get_cgroup_configurator(initialize=False) as configurator: log_collector_unit_file = configurator.mocks.get_mapped_path(UnitFilePaths.logcollector) - original_memory_limit = "MemoryLimit=30M" - # The mock creates the slice unit file with memory limit + # The mock creates the slice unit file configurator.mocks.add_data_file(os.path.join(data_dir, 'init', "azure-walinuxagent-logcollector.slice"), UnitFilePaths.logcollector) - if not os.path.exists(log_collector_unit_file): - raise Exception("{0} should have been created during test setup".format(log_collector_unit_file)) - if not fileutil.findre_in_file(log_collector_unit_file, original_memory_limit): - raise Exception("MemoryLimit was not set correctly. Expected: {0}. Got:\n{1}".format( - original_memory_limit, fileutil.read_file(log_collector_unit_file))) + + self.assertTrue(os.path.exists(log_collector_unit_file), "{0} was not created".format(log_collector_unit_file)) configurator.initialize() - # initialize() should update the unit file to remove the memory limit - self.assertFalse(fileutil.findre_in_file(log_collector_unit_file, original_memory_limit), - "Log collector slice unit file was not updated correctly. Expected no memory limit. Got:\n{0}".format( - fileutil.read_file(log_collector_unit_file))) + # initialize() should remove the unit file + self.assertFalse(os.path.exists(log_collector_unit_file), "{0} should not have been created".format(log_collector_unit_file)) def test_setup_extension_slice_should_create_unit_files(self): with self._get_cgroup_configurator() as configurator: @@ -240,7 +267,7 @@ def test_remove_extension_slice_should_remove_unit_files(self): CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/' \ 'azure-vmextensions-Microsoft.CPlat.Extension.slice'] = \ - CpuCgroup('Microsoft.CPlat.Extension', + CpuControllerV1('Microsoft.CPlat.Extension', '/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/azure-vmextensions-Microsoft.CPlat.Extension.slice') configurator.remove_extension_slice(extension_name="Microsoft.CPlat.Extension") @@ -289,6 +316,17 @@ def test_enable_should_not_track_throttled_time_when_setting_the_cpu_quota_fails self.assertFalse(CGroupsTelemetry.get_track_throttled_time(), "Throttle time should not be tracked") + def test_enable_should_not_track_throttled_time_when_cgroups_v2_enabled(self): + with self._get_cgroup_configurator_v2(initialize=False) as configurator: + if CGroupsTelemetry.get_track_throttled_time(): + raise Exception("Test setup should not start tracking Throttle Time") + + configurator.mocks.add_file(UnitFilePaths.cpu_quota, Exception("A TEST EXCEPTION")) + + configurator.initialize() + + self.assertFalse(CGroupsTelemetry.get_track_throttled_time(), "Throttle time should not be tracked when using cgroups v2") + def test_disable_should_reset_cpu_quota(self): with self._get_cgroup_configurator() as configurator: if len(CGroupsTelemetry._tracked) == 0: @@ -326,10 +364,10 @@ def test_disable_should_reset_cpu_quota_for_all_cgroups(self): configurator.setup_extension_slice(extension_name=extension_name, cpu_quota=5) configurator.set_extension_services_cpu_memory_quota(service_list) CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'] = \ - CpuCgroup('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service') + CpuControllerV1('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service') CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/' \ 'azure-vmextensions-Microsoft.CPlat.Extension.slice'] = \ - CpuCgroup('Microsoft.CPlat.Extension', + CpuControllerV1('Microsoft.CPlat.Extension', '/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/azure-vmextensions-Microsoft.CPlat.Extension.slice') configurator.disable("UNIT TEST", DisableCgroups.ALL) @@ -379,7 +417,7 @@ def test_start_extension_command_should_not_use_systemd_when_cgroups_are_not_ena self.assertEqual(command_calls[0], "date", "The command line should not have been modified") @patch('time.sleep', side_effect=lambda _: mock_sleep()) - def test_start_extension_command_should_use_systemd_run_when_cgroups_are_enabled(self, _): + def test_start_extension_command_should_use_systemd_run_when_cgroups_v1_are_enabled(self, _): with self._get_cgroup_configurator() as configurator: with patch("azurelinuxagent.ga.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch: configurator.start_extension_command( @@ -447,6 +485,34 @@ def mock_popen(command_arg, *args, **kwargs): self.assertIn("A TEST EXCEPTION", str(context_manager.exception)) + @patch('time.sleep', side_effect=lambda _: mock_sleep()) + def test_start_extension_command_should_not_use_systemd_when_cgroup_v2_enabled(self, _): + with self._get_cgroup_configurator_v2() as configurator: + self.assertFalse(configurator.enabled()) + + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.start_extension_command") as v2_extension_start_command: + with patch("azurelinuxagent.ga.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as patcher: + configurator.start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + cmd_name="test", + timeout=300, + shell=False, + cwd=self.tmp_dir, + env={}, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + command_calls = [args[0] for args, _ in patcher.call_args_list if + len(args) > 0 and "date" in args[0]] + self.assertFalse(v2_extension_start_command.called) + self.assertEqual(len(command_calls), 1, + "The test command should have been called exactly once [{0}]".format( + command_calls)) + self.assertNotIn("systemd-run", command_calls[0], + "The command should not have been invoked using systemd") + self.assertEqual(command_calls[0], "date", "The command line should not have been modified") + @patch('time.sleep', side_effect=lambda _: mock_sleep()) def test_start_extension_command_should_disable_cgroups_and_invoke_the_command_directly_if_systemd_fails(self, _): with self._get_cgroup_configurator() as configurator: @@ -454,7 +520,7 @@ def test_start_extension_command_should_disable_cgroups_and_invoke_the_command_d configurator.mocks.add_command(MockCommand("systemd-run", return_value=1, stdout='', stderr='Failed to start transient scope unit: syntax error')) with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as output_file: - with patch("azurelinuxagent.ga.cgroupconfigurator.add_event") as mock_add_event: + with patch("azurelinuxagent.ga.cgroupapi.add_event") as mock_add_event: with patch("subprocess.Popen", wraps=subprocess.Popen) as popen_patch: CGroupsTelemetry.reset() @@ -526,112 +592,7 @@ def test_start_extension_command_should_disable_cgroups_and_invoke_the_command_d self.assertEqual(len(CGroupsTelemetry._tracked), 0, "No cgroups should have been created") - @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4 for now. Need to revisit to fix it") - @attr('requires_sudo') - @patch('time.sleep', side_effect=lambda _: mock_sleep()) - def test_start_extension_command_should_not_use_fallback_option_if_extension_fails(self, *args): - self.assertTrue(i_am_root(), "Test does not run when non-root") - - with self._get_cgroup_configurator() as configurator: - pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below - - command = "ls folder_does_not_exist" - - with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: - with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: - with patch("azurelinuxagent.ga.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch: - with self.assertRaises(ExtensionError) as context_manager: - configurator.start_extension_command( - extension_name="Microsoft.Compute.TestExtension-1.2.3", - command=command, - cmd_name="test", - timeout=300, - shell=True, - cwd=self.tmp_dir, - env={}, - stdout=stdout, - stderr=stderr) - - extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if command in args[0]] - - self.assertEqual(1, len(extension_calls), "The extension should have been invoked exactly once") - self.assertIn("systemd-run", extension_calls[0], - "The first call to the extension should have used systemd") - - self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginUnknownFailure) - self.assertIn("Non-zero exit code", ustr(context_manager.exception)) - # The scope name should appear in the process output since systemd-run was invoked and stderr - # wasn't truncated. - self.assertIn("Running scope as unit", ustr(context_manager.exception)) - - @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4 for now. Need to revisit to fix it") - @attr('requires_sudo') - @patch('time.sleep', side_effect=lambda _: mock_sleep()) - @patch("azurelinuxagent.ga.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN", 5) - def test_start_extension_command_should_not_use_fallback_option_if_extension_fails_with_long_output(self, *args): - self.assertTrue(i_am_root(), "Test does not run when non-root") - - with self._get_cgroup_configurator() as configurator: - pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below - - long_output = "a"*20 # large enough to ensure both stdout and stderr are truncated - long_stdout_stderr_command = "echo {0} && echo {0} >&2 && ls folder_does_not_exist".format(long_output) - - with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: - with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: - with patch("azurelinuxagent.ga.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch: - with self.assertRaises(ExtensionError) as context_manager: - configurator.start_extension_command( - extension_name="Microsoft.Compute.TestExtension-1.2.3", - command=long_stdout_stderr_command, - cmd_name="test", - timeout=300, - shell=True, - cwd=self.tmp_dir, - env={}, - stdout=stdout, - stderr=stderr) - - extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if long_stdout_stderr_command in args[0]] - - self.assertEqual(1, len(extension_calls), "The extension should have been invoked exactly once") - self.assertIn("systemd-run", extension_calls[0], - "The first call to the extension should have used systemd") - - self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginUnknownFailure) - self.assertIn("Non-zero exit code", ustr(context_manager.exception)) - # stdout and stderr should have been truncated, so the scope name doesn't appear in stderr - # even though systemd-run ran - self.assertNotIn("Running scope as unit", ustr(context_manager.exception)) - - @attr('requires_sudo') - def test_start_extension_command_should_not_use_fallback_option_if_extension_times_out(self, *args): # pylint: disable=unused-argument - self.assertTrue(i_am_root(), "Test does not run when non-root") - - with self._get_cgroup_configurator() as configurator: - pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below - - with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: - with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: - with patch("azurelinuxagent.ga.extensionprocessutil.wait_for_process_completion_or_timeout", - return_value=[True, None, 0]): - with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupsApi._is_systemd_failure", - return_value=False): - with self.assertRaises(ExtensionError) as context_manager: - configurator.start_extension_command( - extension_name="Microsoft.Compute.TestExtension-1.2.3", - command="date", - cmd_name="test", - timeout=300, - shell=True, - cwd=self.tmp_dir, - env={}, - stdout=stdout, - stderr=stderr) - - self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginHandlerScriptTimedout) - self.assertIn("Timeout", ustr(context_manager.exception)) - + @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4, they run on containers where the OS commands needed by the test are not present.") @patch('time.sleep', side_effect=lambda _: mock_sleep()) def test_start_extension_command_should_capture_only_the_last_subprocess_output(self, _): with self._get_cgroup_configurator() as configurator: @@ -644,11 +605,13 @@ def mock_popen(command, *args, **kwargs): # Popen can accept both strings and lists, handle both here. if isinstance(command, str): - systemd_command = command.replace('systemd-run', 'systemd-run syntax_error') + command = command.replace('systemd-run', 'systemd-run syntax_error') elif isinstance(command, list) and command[0] == 'systemd-run': - systemd_command = ['systemd-run', 'syntax_error'] + command[1:] + command = ['systemd-run', 'syntax_error'] + command[1:] + elif command == ['systemctl', 'daemon-reload']: + command = ['echo', 'systemctl', 'daemon-reload'] - return original_popen(systemd_command, *args, **kwargs) + return original_popen(command, *args, **kwargs) expected_output = "[stdout]\n{0}\n\n\n[stderr]\n" @@ -749,7 +712,8 @@ def test_it_should_stop_tracking_extension_services_cgroups(self): with self._get_cgroup_configurator() as configurator: with patch("os.path.exists") as mock_path: mock_path.return_value = True - CGroupsTelemetry.track_cgroup(CpuCgroup('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service')) + CGroupsTelemetry.track_cgroup_controller( + CpuControllerV1('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service')) configurator.stop_tracking_extension_services_cgroups(service_list) tracked = CGroupsTelemetry._tracked @@ -808,7 +772,7 @@ def side_effect(path): with patch("os.path.exists") as mock_path: mock_path.side_effect = side_effect CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'] = \ - CpuCgroup('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service') + CpuControllerV1('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service') configurator.stop_tracking_unit_cgroups("extension.service") tracked = CGroupsTelemetry._tracked @@ -943,7 +907,7 @@ def get_completed_process(): agent_processes = [os.getppid(), os.getpid()] + agent_command_processes + [start_extension.systemd_run_pid] other_processes = [1, get_completed_process()] + extension_processes - with patch("azurelinuxagent.ga.cgroupconfigurator.CGroupsApi.get_processes_in_cgroup", return_value=agent_processes + other_processes): + with patch("azurelinuxagent.ga.cgroupapi.CgroupV1.get_processes", return_value=agent_processes + other_processes): with self.assertRaises(CGroupsException) as context_manager: configurator._check_processes_in_agent_cgroup() @@ -987,7 +951,7 @@ def test_check_cgroups_should_disable_cgroups_when_a_check_fails(self): patchers.append(p) p.start() - with patch("azurelinuxagent.ga.cgroupconfigurator.add_event") as add_event: + with patch("azurelinuxagent.ga.cgroupapi.add_event") as add_event: configurator.enable() tracked_metrics = [ @@ -1011,14 +975,48 @@ def test_check_cgroups_should_disable_cgroups_when_a_check_fails(self): for p in patchers: p.stop() + @patch('azurelinuxagent.ga.cgroupconfigurator.CGroupConfigurator._Impl._check_processes_in_agent_cgroup', side_effect=CGroupsException("Test")) + @patch('azurelinuxagent.ga.cgroupapi.add_event') + def test_agent_should_not_enable_cgroups_if_unexpected_process_already_in_agent_cgroups(self, add_event, _): + command_mocks = [MockCommand(r"^systemctl show walinuxagent\.service --property Slice", +'''Slice=azure.slice +''')] + original_read_file = fileutil.read_file + + def mock_read_file(filepath, **args): + if filepath == "/proc/self/cgroup": + filepath = os.path.join(data_dir, "cgroups", "proc_self_cgroup_azure_slice") + return original_read_file(filepath, **args) + + with self._get_cgroup_configurator(initialize=False, mock_commands=command_mocks) as configurator: + with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file): + configurator.initialize() + + self.assertFalse(configurator.enabled(), "Cgroups should not be enabled") + disable_events = [kwargs for _, kwargs in add_event.call_args_list if kwargs["op"] == WALAEventOperation.CGroupsDisabled] + self.assertTrue( + len(disable_events) == 1, + "Exactly 1 event should have been emitted. Got: {0}".format(disable_events)) + self.assertIn( + "Found unexpected processes in the agent cgroup before agent enable cgroups", + disable_events[0]["message"], + "The error message is not correct when process check failed") + def test_check_agent_memory_usage_should_raise_a_cgroups_exception_when_the_limit_is_exceeded(self): metrics = [MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.TOTAL_MEM_USAGE, AGENT_NAME_TELEMETRY, conf.get_agent_memory_quota() + 1), MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.SWAP_MEM_USAGE, AGENT_NAME_TELEMETRY, conf.get_agent_memory_quota() + 1)] with self.assertRaises(AgentMemoryExceededException) as context_manager: with self._get_cgroup_configurator() as configurator: - with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_tracked_metrics") as tracked_metrics: + with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_tracked_metrics") as tracked_metrics: tracked_metrics.return_value = metrics configurator.check_agent_memory_usage() - self.assertIn("The agent memory limit {0} bytes exceeded".format(conf.get_agent_memory_quota()), ustr(context_manager.exception), "An incorrect exception was raised") \ No newline at end of file + self.assertIn("The agent memory limit {0} bytes exceeded".format(conf.get_agent_memory_quota()), ustr(context_manager.exception), "An incorrect exception was raised") + + def test_get_log_collector_properties_should_return_correct_props(self): + with self._get_cgroup_configurator() as configurator: + self.assertEqual(configurator.get_logcollector_unit_properties(), ["--property=CPUAccounting=yes", "--property=MemoryAccounting=yes", "--property=CPUQuota=5%"]) + + with self._get_cgroup_configurator_v2() as configurator: + self.assertEqual(configurator.get_logcollector_unit_properties(), ["--property=CPUAccounting=yes", "--property=MemoryAccounting=yes", "--property=CPUQuota=5%", "--property=MemoryHigh=170M"]) diff --git a/tests/ga/test_cgroupconfigurator_sudo.py b/tests/ga/test_cgroupconfigurator_sudo.py new file mode 100644 index 0000000000..14b544f5b4 --- /dev/null +++ b/tests/ga/test_cgroupconfigurator_sudo.py @@ -0,0 +1,157 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import contextlib +import subprocess +import tempfile + +from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator +from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry +from azurelinuxagent.common.exception import ExtensionError, ExtensionErrorCodes +from azurelinuxagent.common.future import ustr +from tests.lib.mock_cgroup_environment import mock_cgroup_v1_environment +from tests.lib.tools import AgentTestCase, patch, mock_sleep, i_am_root, is_python_version_26_or_34, skip_if_predicate_true + + +class CGroupConfiguratorSystemdTestCaseSudo(AgentTestCase): + @classmethod + def tearDownClass(cls): + CGroupConfigurator._instance = None + AgentTestCase.tearDownClass() + + @contextlib.contextmanager + def _get_cgroup_configurator(self, initialize=True, enable=True, mock_commands=None): + CGroupConfigurator._instance = None + configurator = CGroupConfigurator.get_instance() + CGroupsTelemetry.reset() + with mock_cgroup_v1_environment(self.tmp_dir) as mock_environment: + if mock_commands is not None: + for command in mock_commands: + mock_environment.add_command(command) + configurator.mocks = mock_environment + if initialize: + if not enable: + with patch.object(configurator, "enable"): + configurator.initialize() + else: + configurator.initialize() + yield configurator + + @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4 for now. Need to revisit to fix it") + @patch('time.sleep', side_effect=lambda _: mock_sleep()) + def test_start_extension_command_should_not_use_fallback_option_if_extension_fails(self, *args): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + with self._get_cgroup_configurator() as configurator: + pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below + + command = "ls folder_does_not_exist" + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.ga.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch: + with self.assertRaises(ExtensionError) as context_manager: + configurator.start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command=command, + cmd_name="test", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if command in args[0]] + + self.assertEqual(1, len(extension_calls), "The extension should have been invoked exactly once") + self.assertIn("systemd-run", extension_calls[0], + "The first call to the extension should have used systemd") + + self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginUnknownFailure) + self.assertIn("Non-zero exit code", ustr(context_manager.exception)) + # The scope name should appear in the process output since systemd-run was invoked and stderr + # wasn't truncated. + self.assertIn("Running scope as unit", ustr(context_manager.exception)) + + @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4 for now. Need to revisit to fix it") + @patch('time.sleep', side_effect=lambda _: mock_sleep()) + @patch("azurelinuxagent.ga.extensionprocessutil.TELEMETRY_MESSAGE_MAX_LEN", 5) + def test_start_extension_command_should_not_use_fallback_option_if_extension_fails_with_long_output(self, *args): + self.assertTrue(i_am_root(), "Test does not run when non-root") + + with self._get_cgroup_configurator() as configurator: + pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below + + long_output = "a"*20 # large enough to ensure both stdout and stderr are truncated + long_stdout_stderr_command = "echo {0} && echo {0} >&2 && ls folder_does_not_exist".format(long_output) + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.ga.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch: + with self.assertRaises(ExtensionError) as context_manager: + configurator.start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command=long_stdout_stderr_command, + cmd_name="test", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + extension_calls = [args[0] for (args, _) in popen_patch.call_args_list if long_stdout_stderr_command in args[0]] + + self.assertEqual(1, len(extension_calls), "The extension should have been invoked exactly once") + self.assertIn("systemd-run", extension_calls[0], + "The first call to the extension should have used systemd") + + self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginUnknownFailure) + self.assertIn("Non-zero exit code", ustr(context_manager.exception)) + # stdout and stderr should have been truncated, so the scope name doesn't appear in stderr + # even though systemd-run ran + self.assertNotIn("Running scope as unit", ustr(context_manager.exception)) + + def test_start_extension_command_should_not_use_fallback_option_if_extension_times_out(self, *args): # pylint: disable=unused-argument + self.assertTrue(i_am_root(), "Test does not run when non-root") + + with self._get_cgroup_configurator() as configurator: + pass # release the mocks used to create the test CGroupConfigurator so that they do not conflict the mock Popen below + + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout: + with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr: + with patch("azurelinuxagent.ga.extensionprocessutil.wait_for_process_completion_or_timeout", + return_value=[True, None, 0]): + with patch("azurelinuxagent.ga.cgroupapi._SystemdCgroupApi._is_systemd_failure", + return_value=False): + with self.assertRaises(ExtensionError) as context_manager: + configurator.start_extension_command( + extension_name="Microsoft.Compute.TestExtension-1.2.3", + command="date", + cmd_name="test", + timeout=300, + shell=True, + cwd=self.tmp_dir, + env={}, + stdout=stdout, + stderr=stderr) + + self.assertEqual(context_manager.exception.code, ExtensionErrorCodes.PluginHandlerScriptTimedout) + self.assertIn("Timeout", ustr(context_manager.exception)) diff --git a/tests/ga/test_cgroupcontroller.py b/tests/ga/test_cgroupcontroller.py new file mode 100644 index 0000000000..a01237e965 --- /dev/null +++ b/tests/ga/test_cgroupcontroller.py @@ -0,0 +1,55 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import os +import random + +from azurelinuxagent.ga.cgroupcontroller import _CgroupController +from tests.lib.tools import AgentTestCase, patch + + +def consume_cpu_time(): + waste = 0 + for x in range(1, 200000): # pylint: disable=unused-variable + waste += random.random() + return waste + + +class TestCgroupController(AgentTestCase): + def test_is_active(self): + test_metrics = _CgroupController("test_extension", self.tmp_dir) + + with open(os.path.join(self.tmp_dir, "cgroup.procs"), mode="wb") as tasks: + tasks.write(str(1000).encode()) + + self.assertEqual(True, test_metrics.is_active()) + + @patch("azurelinuxagent.common.logger.periodic_warn") + def test_is_active_file_not_present(self, patch_periodic_warn): + test_metrics = _CgroupController("test_extension", self.tmp_dir) + self.assertFalse(test_metrics.is_active()) + + self.assertEqual(0, patch_periodic_warn.call_count) + + @patch("azurelinuxagent.common.logger.periodic_warn") + def test_is_active_incorrect_file(self, patch_periodic_warn): + open(os.path.join(self.tmp_dir, "cgroup.procs"), mode="wb").close() + test_metrics = _CgroupController("test_extension", os.path.join(self.tmp_dir, "cgroup.procs")) + self.assertEqual(False, test_metrics.is_active()) + self.assertEqual(1, patch_periodic_warn.call_count) diff --git a/tests/ga/test_cgroups.py b/tests/ga/test_cgroups.py deleted file mode 100644 index 0ffcfed1bd..0000000000 --- a/tests/ga/test_cgroups.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright 2018 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from __future__ import print_function - -import errno -import os -import random -import shutil - -from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup, MetricsCounter, CounterNotFound -from azurelinuxagent.common.exception import CGroupsException -from azurelinuxagent.common.osutil import get_osutil -from azurelinuxagent.common.utils import fileutil -from tests.lib.tools import AgentTestCase, patch, data_dir - - -def consume_cpu_time(): - waste = 0 - for x in range(1, 200000): # pylint: disable=unused-variable - waste += random.random() - return waste - - -class TestCGroup(AgentTestCase): - def test_is_active(self): - test_cgroup = CpuCgroup("test_extension", self.tmp_dir) - self.assertEqual(False, test_cgroup.is_active()) - - with open(os.path.join(self.tmp_dir, "tasks"), mode="wb") as tasks: - tasks.write(str(1000).encode()) - - self.assertEqual(True, test_cgroup.is_active()) - - @patch("azurelinuxagent.common.logger.periodic_warn") - def test_is_active_file_not_present(self, patch_periodic_warn): - test_cgroup = CpuCgroup("test_extension", self.tmp_dir) - self.assertEqual(False, test_cgroup.is_active()) - - test_cgroup = MemoryCgroup("test_extension", os.path.join(self.tmp_dir, "this_cgroup_does_not_exist")) - self.assertEqual(False, test_cgroup.is_active()) - - self.assertEqual(0, patch_periodic_warn.call_count) - - @patch("azurelinuxagent.common.logger.periodic_warn") - def test_is_active_incorrect_file(self, patch_periodic_warn): - open(os.path.join(self.tmp_dir, "tasks"), mode="wb").close() - test_cgroup = CpuCgroup("test_extension", os.path.join(self.tmp_dir, "tasks")) - self.assertEqual(False, test_cgroup.is_active()) - self.assertEqual(1, patch_periodic_warn.call_count) - - -class TestCpuCgroup(AgentTestCase): - @classmethod - def setUpClass(cls): - AgentTestCase.setUpClass() - - original_read_file = fileutil.read_file - - # - # Tests that need to mock the contents of /proc/stat or */cpuacct/stat can set this map from - # the file that needs to be mocked to the mock file (each test starts with an empty map). If - # an Exception is given instead of a path, the exception is raised - # - cls.mock_read_file_map = {} - - def mock_read_file(filepath, **args): - if filepath in cls.mock_read_file_map: - mapped_value = cls.mock_read_file_map[filepath] - if isinstance(mapped_value, Exception): - raise mapped_value - filepath = mapped_value - return original_read_file(filepath, **args) - - cls.mock_read_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file) - cls.mock_read_file.start() - - @classmethod - def tearDownClass(cls): - cls.mock_read_file.stop() - AgentTestCase.tearDownClass() - - def setUp(self): - AgentTestCase.setUp(self) - TestCpuCgroup.mock_read_file_map.clear() - - def test_initialize_cpu_usage_should_set_current_cpu_usage(self): - cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test") - - TestCpuCgroup.mock_read_file_map = { - "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"), - os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0") - } - - cgroup.initialize_cpu_usage() - - self.assertEqual(cgroup._current_cgroup_cpu, 63763) - self.assertEqual(cgroup._current_system_cpu, 5496872) - - def test_get_cpu_usage_should_return_the_cpu_usage_since_its_last_invocation(self): - osutil = get_osutil() - - cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test") - - TestCpuCgroup.mock_read_file_map = { - "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"), - os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0") - } - - cgroup.initialize_cpu_usage() - - TestCpuCgroup.mock_read_file_map = { - "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t1"), - os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t1") - } - - cpu_usage = cgroup.get_cpu_usage() - - self.assertEqual(cpu_usage, round(100.0 * 0.000307697876885 * osutil.get_processor_cores(), 3)) - - TestCpuCgroup.mock_read_file_map = { - "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t2"), - os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t2") - } - - cpu_usage = cgroup.get_cpu_usage() - - self.assertEqual(cpu_usage, round(100.0 * 0.000445181085968 * osutil.get_processor_cores(), 3)) - - def test_initialize_cpu_usage_should_set_the_cgroup_usage_to_0_when_the_cgroup_does_not_exist(self): - cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test") - - io_error_2 = IOError() - io_error_2.errno = errno.ENOENT # "No such directory" - - TestCpuCgroup.mock_read_file_map = { - "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"), - os.path.join(cgroup.path, "cpuacct.stat"): io_error_2 - } - - cgroup.initialize_cpu_usage() - - self.assertEqual(cgroup._current_cgroup_cpu, 0) - self.assertEqual(cgroup._current_system_cpu, 5496872) # check the system usage just for test sanity - - def test_initialize_cpu_usage_should_raise_an_exception_when_called_more_than_once(self): - cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test") - - TestCpuCgroup.mock_read_file_map = { - "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"), - os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0") - } - - cgroup.initialize_cpu_usage() - - with self.assertRaises(CGroupsException): - cgroup.initialize_cpu_usage() - - def test_get_cpu_usage_should_raise_an_exception_when_initialize_cpu_usage_has_not_been_invoked(self): - cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test") - - with self.assertRaises(CGroupsException): - cpu_usage = cgroup.get_cpu_usage() # pylint: disable=unused-variable - - def test_get_throttled_time_should_return_the_value_since_its_last_invocation(self): - test_file = os.path.join(self.tmp_dir, "cpu.stat") - shutil.copyfile(os.path.join(data_dir, "cgroups", "cpu.stat_t0"), test_file) # throttled_time = 50 - cgroup = CpuCgroup("test", self.tmp_dir) - cgroup.initialize_cpu_usage() - shutil.copyfile(os.path.join(data_dir, "cgroups", "cpu.stat_t1"), test_file) # throttled_time = 2075541442327 - - throttled_time = cgroup.get_cpu_throttled_time() - - self.assertEqual(throttled_time, float(2075541442327 - 50) / 1E9, "The value of throttled_time is incorrect") - - def test_get_tracked_metrics_should_return_the_throttled_time(self): - cgroup = CpuCgroup("test", os.path.join(data_dir, "cgroups")) - cgroup.initialize_cpu_usage() - - def find_throttled_time(metrics): - return [m for m in metrics if m.counter == MetricsCounter.THROTTLED_TIME] - - found = find_throttled_time(cgroup.get_tracked_metrics()) - self.assertTrue(len(found) == 0, "get_tracked_metrics should not fetch the throttled time by default. Found: {0}".format(found)) - - found = find_throttled_time(cgroup.get_tracked_metrics(track_throttled_time=True)) - self.assertTrue(len(found) == 1, "get_tracked_metrics should have fetched the throttled time by default. Found: {0}".format(found)) - - -class TestMemoryCgroup(AgentTestCase): - def test_get_metrics(self): - test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "memory_mount")) - - memory_usage = test_mem_cg.get_memory_usage() - self.assertEqual(150000, memory_usage) - - max_memory_usage = test_mem_cg.get_max_memory_usage() - self.assertEqual(1000000, max_memory_usage) - - swap_memory_usage = test_mem_cg.try_swap_memory_usage() - self.assertEqual(20000, swap_memory_usage) - - def test_get_metrics_when_files_not_present(self): - test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups")) - - with self.assertRaises(IOError) as e: - test_mem_cg.get_memory_usage() - - self.assertEqual(e.exception.errno, errno.ENOENT) - - with self.assertRaises(IOError) as e: - test_mem_cg.get_max_memory_usage() - - self.assertEqual(e.exception.errno, errno.ENOENT) - - with self.assertRaises(IOError) as e: - test_mem_cg.try_swap_memory_usage() - - self.assertEqual(e.exception.errno, errno.ENOENT) - - def test_get_memory_usage_counters_not_found(self): - test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "missing_memory_counters")) - - with self.assertRaises(CounterNotFound): - test_mem_cg.get_memory_usage() - - swap_memory_usage = test_mem_cg.try_swap_memory_usage() - self.assertEqual(0, swap_memory_usage) diff --git a/tests/ga/test_cgroupstelemetry.py b/tests/ga/test_cgroupstelemetry.py index 26fcecbf65..ab4e33048b 100644 --- a/tests/ga/test_cgroupstelemetry.py +++ b/tests/ga/test_cgroupstelemetry.py @@ -19,9 +19,11 @@ import random import time -from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup +from azurelinuxagent.ga.cgroupcontroller import MetricsCounter from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.ga.cpucontroller import CpuControllerV1 +from azurelinuxagent.ga.memorycontroller import MemoryControllerV1 from tests.lib.tools import AgentTestCase, data_dir, patch @@ -80,9 +82,9 @@ def setUpClass(cls): def mock_read_file(filepath, **args): if filepath == "/proc/stat": - filepath = os.path.join(data_dir, "cgroups", "proc_stat_t0") + filepath = os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0") elif filepath.endswith("/cpuacct.stat"): - filepath = os.path.join(data_dir, "cgroups", "cpuacct.stat_t0") + filepath = os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0") return original_read_file(filepath, **args) cls._mock_read_cpu_cgroup_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file) @@ -103,76 +105,81 @@ def tearDown(self): CGroupsTelemetry.reset() @staticmethod - def _track_new_extension_cgroups(num_extensions): + def _track_new_extension_cgroup_controllers(num_extensions): for i in range(num_extensions): - dummy_cpu_cgroup = CpuCgroup("dummy_extension_{0}".format(i), "dummy_cpu_path_{0}".format(i)) - CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + dummy_cpu_controller = CpuControllerV1("dummy_extension_{0}".format(i), "dummy_cpu_path_{0}".format(i)) + CGroupsTelemetry.track_cgroup_controller(dummy_cpu_controller) - dummy_memory_cgroup = MemoryCgroup("dummy_extension_{0}".format(i), "dummy_memory_path_{0}".format(i)) - CGroupsTelemetry.track_cgroup(dummy_memory_cgroup) + dummy_memory_controller = MemoryControllerV1("dummy_extension_{0}".format(i), "dummy_memory_path_{0}".format(i)) + CGroupsTelemetry.track_cgroup_controller(dummy_memory_controller) - def _assert_cgroups_are_tracked(self, num_extensions): + def _assert_cgroup_controllers_are_tracked(self, num_extensions): for i in range(num_extensions): self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i))) self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i))) - def _assert_polled_metrics_equal(self, metrics, cpu_metric_value, memory_metric_value, max_memory_metric_value, swap_memory_value): + def _assert_polled_metrics_equal(self, metrics, cpu_metric_value, current_total_memory_metric_value, current_anon_memory_metric_value, current_cache_memory_metric_value, max_memory_metric_value, swap_memory_value): for metric in metrics: self.assertIn(metric.category, ["CPU", "Memory"]) if metric.category == "CPU": self.assertEqual(metric.counter, "% Processor Time") self.assertEqual(metric.value, cpu_metric_value) if metric.category == "Memory": - self.assertIn(metric.counter, ["Total Memory Usage", "Max Memory Usage", "Swap Memory Usage"]) - if metric.counter == "Total Memory Usage": - self.assertEqual(metric.value, memory_metric_value) - elif metric.counter == "Max Memory Usage": + self.assertIn(metric.counter, [MetricsCounter.TOTAL_MEM_USAGE, MetricsCounter.ANON_MEM_USAGE, MetricsCounter.CACHE_MEM_USAGE, MetricsCounter.MAX_MEM_USAGE, MetricsCounter.SWAP_MEM_USAGE]) + if metric.counter == MetricsCounter.TOTAL_MEM_USAGE: + self.assertEqual(metric.value, current_total_memory_metric_value) + elif metric.counter == MetricsCounter.ANON_MEM_USAGE: + self.assertEqual(metric.value, current_anon_memory_metric_value) + elif metric.counter == MetricsCounter.CACHE_MEM_USAGE: + self.assertEqual(metric.value, current_cache_memory_metric_value) + elif metric.counter == MetricsCounter.MAX_MEM_USAGE: self.assertEqual(metric.value, max_memory_metric_value) - elif metric.counter == "Swap Memory Usage": + elif metric.counter == MetricsCounter.SWAP_MEM_USAGE: self.assertEqual(metric.value, swap_memory_value) def test_telemetry_polling_with_active_cgroups(self, *args): # pylint: disable=unused-argument num_extensions = 3 - self._track_new_extension_cgroups(num_extensions) - - with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: - with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: - with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: - with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.try_swap_memory_usage") as patch_try_swap_memory_usage: - with patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage") as patch_get_cpu_usage: - with patch("azurelinuxagent.ga.cgroup.CGroup.is_active") as patch_is_active: - patch_is_active.return_value = True - - current_cpu = 30 - current_memory = 209715200 - current_max_memory = 471859200 - current_swap_memory = 20971520 - - # 1 CPU metric + 1 Current Memory + 1 Max memory + 1 swap memory - num_of_metrics_per_extn_expected = 4 - patch_get_cpu_usage.return_value = current_cpu - patch_get_memory_usage.return_value = current_memory # example 200 MB - patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB - patch_try_swap_memory_usage.return_value = current_swap_memory # example 20MB - num_polls = 12 - - for data_count in range(1, num_polls + 1): # pylint: disable=unused-variable - metrics = CGroupsTelemetry.poll_all_tracked() - - self.assertEqual(len(metrics), num_extensions * num_of_metrics_per_extn_expected) - self._assert_polled_metrics_equal(metrics, current_cpu, current_memory, current_max_memory, current_swap_memory) - - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage", side_effect=raise_ioerror) - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage", side_effect=raise_ioerror) - @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage", side_effect=raise_ioerror) - @patch("azurelinuxagent.ga.cgroup.CGroup.is_active", return_value=False) + self._track_new_extension_cgroup_controllers(num_extensions) + + with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage") as patch_get_memory_max_usage: + with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.try_swap_memory_usage") as patch_try_swap_memory_usage: + with patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage") as patch_get_cpu_usage: + with patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") as patch_is_active: + patch_is_active.return_value = True + + current_cpu = 30 + current_anon_memory = 209715200 + current_cache_memory = 314572800 + current_total_memory = 209715200 + 314572800 + current_max_memory = 471859200 + current_swap_memory = 20971520 + + # 1 CPU metric + 1 total Memory + 1 anon memory + 1 cache memory + 1 Max memory + 1 swap memory + num_of_metrics_per_extn_expected = 6 + patch_get_cpu_usage.return_value = current_cpu + patch_get_memory_usage.return_value = current_anon_memory, current_cache_memory # example 200 MB, 300 MB + patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB + patch_try_swap_memory_usage.return_value = current_swap_memory # example 20MB + num_polls = 18 + + for data_count in range(1, num_polls + 1): # pylint: disable=unused-variable + metrics = CGroupsTelemetry.poll_all_tracked() + + self.assertEqual(len(metrics), num_extensions * num_of_metrics_per_extn_expected) + self._assert_polled_metrics_equal(metrics, current_cpu, current_total_memory, current_anon_memory, current_cache_memory, current_max_memory, current_swap_memory) + + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage", side_effect=raise_ioerror) + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage", side_effect=raise_ioerror) + @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage", side_effect=raise_ioerror) + @patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active", return_value=False) def test_telemetry_polling_with_inactive_cgroups(self, *_): num_extensions = 5 no_extensions_expected = 0 # pylint: disable=unused-variable - self._track_new_extension_cgroups(num_extensions) - self._assert_cgroups_are_tracked(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) + self._assert_cgroup_controllers_are_tracked(num_extensions) metrics = CGroupsTelemetry.poll_all_tracked() @@ -182,14 +189,14 @@ def test_telemetry_polling_with_inactive_cgroups(self, *_): self.assertEqual(len(metrics), 0) - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage") - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") - @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage") - @patch("azurelinuxagent.ga.cgroup.CGroup.is_active") + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage") + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage") + @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage") + @patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") def test_telemetry_polling_with_changing_cgroups_state(self, patch_is_active, patch_get_cpu_usage, # pylint: disable=unused-argument patch_get_mem, patch_get_max_mem, *args): num_extensions = 5 - self._track_new_extension_cgroups(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) patch_is_active.return_value = True @@ -197,17 +204,18 @@ def test_telemetry_polling_with_changing_cgroups_state(self, patch_is_active, pa expected_data_count = 1 # pylint: disable=unused-variable current_cpu = 30 - current_memory = 209715200 + current_anon_memory = 104857600 + current_cache_memory = 104857600 current_max_memory = 471859200 patch_get_cpu_usage.return_value = current_cpu - patch_get_mem.return_value = current_memory # example 200 MB + patch_get_mem.return_value = current_anon_memory, current_cache_memory # example 100 MB, 100 MB patch_get_max_mem.return_value = current_max_memory # example 450 MB - self._assert_cgroups_are_tracked(num_extensions) + self._assert_cgroup_controllers_are_tracked(num_extensions) CGroupsTelemetry.poll_all_tracked() - self._assert_cgroups_are_tracked(num_extensions) + self._assert_cgroup_controllers_are_tracked(num_extensions) patch_is_active.return_value = False patch_get_cpu_usage.side_effect = raise_ioerror @@ -225,7 +233,7 @@ def test_telemetry_polling_with_changing_cgroups_state(self, patch_is_active, pa @patch("azurelinuxagent.common.logger.periodic_warn") def test_telemetry_polling_to_not_generate_transient_logs_ioerror_file_not_found(self, patch_periodic_warn): num_extensions = 1 - self._track_new_extension_cgroups(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) self.assertEqual(0, patch_periodic_warn.call_count) # Not expecting logs present for io_error with errno=errno.ENOENT @@ -243,7 +251,7 @@ def test_telemetry_polling_to_generate_transient_logs_ioerror_permission_denied( num_extensions = 1 num_controllers = 1 is_active_check_per_controller = 2 - self._track_new_extension_cgroups(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) self.assertEqual(0, patch_periodic_warn.call_count) @@ -254,7 +262,7 @@ def test_telemetry_polling_to_generate_transient_logs_ioerror_permission_denied( with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=io_error_3): poll_count = 1 expected_count_per_call = num_controllers + is_active_check_per_controller - # get_max_memory_usage memory controller would generate a log statement, and each cgroup would invoke a + # get_cpu_usage cpu controller would generate a log statement, and each cgroup controller would invoke a # is active check raising an exception for data_count in range(poll_count, 10): # pylint: disable=unused-variable @@ -263,23 +271,23 @@ def test_telemetry_polling_to_generate_transient_logs_ioerror_permission_denied( def test_telemetry_polling_to_generate_transient_logs_index_error(self): num_extensions = 1 - self._track_new_extension_cgroups(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) # Generating a different kind of error (non-IOError) to check the logging. # Trying to invoke IndexError during the getParameter call with patch("azurelinuxagent.common.utils.fileutil.read_file", return_value=''): with patch("azurelinuxagent.common.logger.periodic_warn") as patch_periodic_warn: - expected_call_count = 1 # 1 periodic warning for memory + expected_call_count = 1 # 1 periodic warning for cpu for data_count in range(1, 10): # pylint: disable=unused-variable CGroupsTelemetry.poll_all_tracked() self.assertEqual(expected_call_count, patch_periodic_warn.call_count) - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.try_swap_memory_usage") - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage") - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") - @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage") - @patch("azurelinuxagent.ga.cgroup.CGroup.is_active") - def test_telemetry_calculations(self, patch_is_active, patch_get_cpu_usage, patch_get_memory_usage, patch_get_memory_max_usage, patch_try_memory_swap_usage, + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.try_swap_memory_usage") + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage") + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage") + @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage") + @patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") + def test_telemetry_calculations(self, patch_is_active, patch_get_cpu_usage, patch_get_memory_usage, patch_get_memory_max_usage, patch_try_memory_swap_usage, *args): # pylint: disable=unused-argument num_polls = 10 num_extensions = 1 @@ -287,47 +295,48 @@ def test_telemetry_calculations(self, patch_is_active, patch_get_cpu_usage, pat cpu_percent_values = [random.randint(0, 100) for _ in range(num_polls)] # only verifying calculations and not validity of the values. - memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] + anon_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] + cache_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] max_memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] swap_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)] - self._track_new_extension_cgroups(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) self.assertEqual(2 * num_extensions, len(CGroupsTelemetry._tracked)) for i in range(num_polls): patch_is_active.return_value = True patch_get_cpu_usage.return_value = cpu_percent_values[i] - patch_get_memory_usage.return_value = memory_usage_values[i] + patch_get_memory_usage.return_value = anon_usage_values[i], cache_usage_values[i] patch_get_memory_max_usage.return_value = max_memory_usage_values[i] patch_try_memory_swap_usage.return_value = swap_usage_values[i] metrics = CGroupsTelemetry.poll_all_tracked() - # 1 CPU metric + 1 Current Memory + 1 Max memory + 1 swap memory - self.assertEqual(len(metrics), 4 * num_extensions) - self._assert_polled_metrics_equal(metrics, cpu_percent_values[i], memory_usage_values[i], max_memory_usage_values[i], swap_usage_values[i]) + # 1 CPU metric + 1 Total Memory + 1 anon memory + 1 cache memory + 1 Max memory + 1 swap memory + self.assertEqual(len(metrics), 6 * num_extensions) + self._assert_polled_metrics_equal(metrics, cpu_percent_values[i], anon_usage_values[i] + cache_usage_values[i], anon_usage_values[i], cache_usage_values[i], max_memory_usage_values[i], swap_usage_values[i]) def test_cgroup_tracking(self, *args): # pylint: disable=unused-argument num_extensions = 5 num_controllers = 2 - self._track_new_extension_cgroups(num_extensions) - self._assert_cgroups_are_tracked(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) + self._assert_cgroup_controllers_are_tracked(num_extensions) self.assertEqual(num_extensions * num_controllers, len(CGroupsTelemetry._tracked)) def test_cgroup_is_tracked(self, *args): # pylint: disable=unused-argument num_extensions = 5 - self._track_new_extension_cgroups(num_extensions) - self._assert_cgroups_are_tracked(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) + self._assert_cgroup_controllers_are_tracked(num_extensions) self.assertFalse(CGroupsTelemetry.is_tracked("not_present_cpu_dummy_path")) self.assertFalse(CGroupsTelemetry.is_tracked("not_present_memory_dummy_path")) - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage", side_effect=raise_ioerror) + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage", side_effect=raise_ioerror) def test_process_cgroup_metric_with_no_memory_cgroup_mounted(self, *args): # pylint: disable=unused-argument num_extensions = 5 - self._track_new_extension_cgroups(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) - with patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage") as patch_get_cpu_usage: - with patch("azurelinuxagent.ga.cgroup.CGroup.is_active") as patch_is_active: + with patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage") as patch_get_cpu_usage: + with patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") as patch_is_active: patch_is_active.return_value = True current_cpu = 30 @@ -339,42 +348,44 @@ def test_process_cgroup_metric_with_no_memory_cgroup_mounted(self, *args): # py metrics = CGroupsTelemetry.poll_all_tracked() self.assertEqual(len(metrics), num_extensions * 1) # Only CPU populated - self._assert_polled_metrics_equal(metrics, current_cpu, 0, 0, 0) + self._assert_polled_metrics_equal(metrics, current_cpu, 0, 0, 0, 0, 0) - @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage", side_effect=raise_ioerror) + @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage", side_effect=raise_ioerror) def test_process_cgroup_metric_with_no_cpu_cgroup_mounted(self, *args): # pylint: disable=unused-argument num_extensions = 5 - self._track_new_extension_cgroups(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) - with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage: - with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage: - with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.try_swap_memory_usage") as patch_try_swap_memory_usage: - with patch("azurelinuxagent.ga.cgroup.CGroup.is_active") as patch_is_active: + with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage") as patch_get_memory_max_usage: + with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage") as patch_get_memory_usage: + with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.try_swap_memory_usage") as patch_try_swap_memory_usage: + with patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") as patch_is_active: patch_is_active.return_value = True - current_memory = 209715200 + current_total_memory = 209715200 + current_anon_memory = 104857600 + current_cache_memory = 104857600 current_max_memory = 471859200 current_swap_memory = 20971520 - patch_get_memory_usage.return_value = current_memory # example 200 MB + patch_get_memory_usage.return_value = current_anon_memory, current_cache_memory # example 100 MB, 100 MB patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB patch_try_swap_memory_usage.return_value = current_swap_memory # example 20MB num_polls = 10 for data_count in range(1, num_polls + 1): # pylint: disable=unused-variable metrics = CGroupsTelemetry.poll_all_tracked() - # Memory is only populated, CPU is not. Thus 3 metrics for memory. - self.assertEqual(len(metrics), num_extensions * 3) - self._assert_polled_metrics_equal(metrics, 0, current_memory, current_max_memory, current_swap_memory) + # Memory is only populated, CPU is not. Thus 5 metrics for memory. + self.assertEqual(len(metrics), num_extensions * 5) + self._assert_polled_metrics_equal(metrics, 0, current_total_memory, current_anon_memory, current_cache_memory, current_max_memory, current_swap_memory) - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage", side_effect=raise_ioerror) - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage", side_effect=raise_ioerror) - @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage", side_effect=raise_ioerror) + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage", side_effect=raise_ioerror) + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage", side_effect=raise_ioerror) + @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage", side_effect=raise_ioerror) def test_extension_telemetry_not_sent_for_empty_perf_metrics(self, *args): # pylint: disable=unused-argument num_extensions = 5 - self._track_new_extension_cgroups(num_extensions) + self._track_new_extension_cgroup_controllers(num_extensions) - with patch("azurelinuxagent.ga.cgroup.CGroup.is_active") as patch_is_active: + with patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") as patch_is_active: patch_is_active.return_value = False poll_count = 1 @@ -383,9 +394,9 @@ def test_extension_telemetry_not_sent_for_empty_perf_metrics(self, *args): # py metrics = CGroupsTelemetry.poll_all_tracked() self.assertEqual(0, len(metrics)) - @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage") - @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_throttled_time") - @patch("azurelinuxagent.ga.cgroup.CGroup.is_active") + @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage") + @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_throttled_time") + @patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") def test_cgroup_telemetry_should_not_report_cpu_negative_value(self, patch_is_active, path_get_throttled_time, patch_get_cpu_usage): num_polls = 5 @@ -396,8 +407,8 @@ def test_cgroup_telemetry_should_not_report_cpu_negative_value(self, patch_is_ac cpu_percent_values.append(-1) cpu_throttled_values = [random.randint(0, 60 * 60) for _ in range(num_polls)] - dummy_cpu_cgroup = CpuCgroup("dummy_extension_name", "dummy_cpu_path") - CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup) + dummy_cpu_cgroup = CpuControllerV1("dummy_extension_name", "dummy_cpu_path") + CGroupsTelemetry.track_cgroup_controller(dummy_cpu_cgroup) self.assertEqual(1, len(CGroupsTelemetry._tracked)) for i in range(num_polls): diff --git a/tests/ga/test_collect_logs.py b/tests/ga/test_collect_logs.py index 4ac3f03fb4..458cd2e693 100644 --- a/tests/ga/test_collect_logs.py +++ b/tests/ga/test_collect_logs.py @@ -18,13 +18,15 @@ import os from azurelinuxagent.common import logger, conf -from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup, MetricValue +from azurelinuxagent.ga.cgroupcontroller import MetricValue, MetricsCounter from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.logger import Logger from azurelinuxagent.common.protocol.util import ProtocolUtil from azurelinuxagent.common.utils import fileutil from azurelinuxagent.ga.collect_logs import get_collect_logs_handler, is_log_collection_allowed, \ get_log_collector_monitor_handler +from azurelinuxagent.ga.cpucontroller import CpuControllerV1, CpuControllerV2 +from azurelinuxagent.ga.memorycontroller import MemoryControllerV1, MemoryControllerV2 from tests.lib.mock_wire_protocol import mock_wire_protocol, MockHttpResponse from tests.lib.http_request_predicates import HttpRequestPredicates from tests.lib.wire_protocol_data import DATA_FILE @@ -32,8 +34,13 @@ is_python_version_26, data_dir +class CgroupVersions: + V1 = "v1" + V2 = "v2" + + @contextlib.contextmanager -def _create_collect_logs_handler(iterations=1, cgroups_enabled=True, collect_logs_conf=True): +def _create_collect_logs_handler(iterations=1, cgroup_version=CgroupVersions.V1, cgroups_enabled=True, collect_logs_conf=True, cgroupv2_resource_limiting_conf=False): """ Creates an instance of CollectLogsHandler that * Uses a mock_wire_protocol for network requests, @@ -52,19 +59,33 @@ def _create_collect_logs_handler(iterations=1, cgroups_enabled=True, collect_log with patch("azurelinuxagent.ga.collect_logs.CollectLogsHandler.stopped", side_effect=[False] * iterations + [True]): with patch("time.sleep"): - # Grab the singleton to patch it - cgroups_configurator_singleton = CGroupConfigurator.get_instance() - with patch.object(cgroups_configurator_singleton, "enabled", return_value=cgroups_enabled): - with patch("azurelinuxagent.ga.collect_logs.conf.get_collect_logs", - return_value=collect_logs_conf): - def run_and_wait(): - collect_logs_handler.run() - collect_logs_handler.join() - - collect_logs_handler = get_collect_logs_handler() - collect_logs_handler.get_mock_wire_protocol = lambda: protocol - collect_logs_handler.run_and_wait = run_and_wait - yield collect_logs_handler + with patch("azurelinuxagent.ga.collect_logs.conf.get_collect_logs", return_value=collect_logs_conf): + + # Grab the singleton to patch it + cgroups_configurator_singleton = CGroupConfigurator.get_instance() + + if cgroup_version == CgroupVersions.V1: + with patch.object(cgroups_configurator_singleton, "enabled", return_value=cgroups_enabled): + def run_and_wait(): + collect_logs_handler.run() + collect_logs_handler.join() + + collect_logs_handler = get_collect_logs_handler() + collect_logs_handler.get_mock_wire_protocol = lambda: protocol + collect_logs_handler.run_and_wait = run_and_wait + yield collect_logs_handler + else: + with patch("azurelinuxagent.ga.collect_logs.conf.get_enable_cgroup_v2_resource_limiting", return_value=cgroupv2_resource_limiting_conf): + with patch.object(cgroups_configurator_singleton, "enabled", return_value=False): + with patch("azurelinuxagent.ga.cgroupconfigurator.CGroupConfigurator._Impl.using_cgroup_v2", return_value=True): + def run_and_wait(): + collect_logs_handler.run() + collect_logs_handler.join() + + collect_logs_handler = get_collect_logs_handler() + collect_logs_handler.get_mock_wire_protocol = lambda: protocol + collect_logs_handler.run_and_wait = run_and_wait + yield collect_logs_handler @skip_if_predicate_true(is_python_version_26, "Disabled on Python 2.6") @@ -101,26 +122,124 @@ def _create_dummy_archive(self, size=1024): def test_it_should_only_collect_logs_if_conditions_are_met(self): # In order to collect logs, three conditions have to be met: - # 1) the flag must be set to true in the conf file - # 2) cgroups must be managing services - # 3) python version 2.7+ which is automatically true for these tests since they are disabled on py2.6 + # 1) It should be enabled in the configuration. + # 2) The system must be using cgroups to manage services - needed for resource limiting of the log collection. The + # agent currently fully supports resource limiting for v1, but only supports log collector resource limiting for v2 + # if enabled via configuration. + # This condition is True if either: + # a. cgroup usage in the agent is enabled; OR + # b. the machine is using cgroup v2 and v2 resource limiting is enabled in the configuration. + # 3) The python version must be greater than 2.6 in order to support the ZipFile library used when collecting. + + # Note, cgroups should not be in an 'enabled' state in the configurator if v2 is in use. Resource governance is + # not fully supported on v2 yet. + + # If collect logs is not enabled in the configuration, then log collection should always be disabled + + # Case 1: + # - Cgroups are enabled in the configurator + # - Cgroup v2 is not in use + # - Cgroup v2 resource limiting conf is True + # - collect logs config flag false + with _create_collect_logs_handler(cgroups_enabled=True, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=True, collect_logs_conf=False): + self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") + + # Case 2: + # - Cgroups are enabled in the configurator + # - Cgroup v2 is not in use + # - Cgroup v2 resource limiting conf is False + # - collect logs config flag false + with _create_collect_logs_handler(cgroups_enabled=True, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=False, collect_logs_conf=False): + self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") + + # Case 3: + # - Cgroups are disabled in the configurator + # - Cgroup v2 is in use + # - Cgroup v2 resource limiting conf is True + # - collect logs config flag false + with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V2, cgroupv2_resource_limiting_conf=True, collect_logs_conf=False): + self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") - # cgroups not enabled, config flag false - with _create_collect_logs_handler(cgroups_enabled=False, collect_logs_conf=False): + # Case 4: + # - Cgroups are disabled in the configurator + # - Cgroup v2 is in use + # - Cgroup v2 resource limiting conf is False + # - collect logs config flag false + with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V2, cgroupv2_resource_limiting_conf=False, collect_logs_conf=False): self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") - # cgroups enabled, config flag false - with _create_collect_logs_handler(cgroups_enabled=True, collect_logs_conf=False): + # Case 5: + # - Cgroups are disabled in the configurator + # - Cgroup v2 is not in use + # - Cgroup v2 resource limiting conf is True + # - collect logs config flag false + with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=True, collect_logs_conf=False): self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") - # cgroups not enabled, config flag true - with _create_collect_logs_handler(cgroups_enabled=False, collect_logs_conf=True): + # Case 6: + # - Cgroups are disabled in the configurator + # - Cgroup v2 is not in use + # - Cgroup v2 resource limiting conf is False + # - collect logs config flag false + with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=False, collect_logs_conf=False): self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") - # cgroups enabled, config flag true - with _create_collect_logs_handler(cgroups_enabled=True, collect_logs_conf=True): + # If collect logs is enabled in the configuration and cgroups are enbaled in the configurator, then log collection should always be enabled + + # Case 7: + # - Cgroups are enabled in the configurator + # - Cgroup v2 is not in use + # - Cgroup v2 resource limiting conf is True + # - collect logs config flag true + with _create_collect_logs_handler(cgroups_enabled=True, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=True, collect_logs_conf=True): self.assertEqual(True, is_log_collection_allowed(), "Log collection should have been enabled") + # Case 8: + # - Cgroups are enabled in the configurator + # - Cgroup v2 is not in use + # - Cgroup v2 resource limiting conf is False + # - collect logs config flag true + with _create_collect_logs_handler(cgroups_enabled=True, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=False, collect_logs_conf=True): + self.assertEqual(True, is_log_collection_allowed(), "Log collection should have been enabled") + + # If collect logs is enabled in the configuration and v2 is in use with the v2 resource limiting conf enabled, then log collection should always be enabled + + # Case 9: + # - Cgroups are disabled in the configurator + # - Cgroup v2 is in use + # - Cgroup v2 resource limiting conf is True + # - collect logs config flag true + with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V2, cgroupv2_resource_limiting_conf=True, collect_logs_conf=True): + self.assertEqual(True, is_log_collection_allowed(), "Log collection should have been enabled") + + # If collect logs is enabled in the configuration and v2 is in use but the v2 resource limiting conf disabled, then log collection should always be disabled + + # Case 10: + # - Cgroups are disabled in the configurator + # - Cgroup v2 is in use + # - Cgroup v2 resource limiting conf is False + # - collect logs config flag true + with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V2, cgroupv2_resource_limiting_conf=False, collect_logs_conf=True): + self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") + + # If collect logs is enabled in the configuration but cgroups are disabled in the configurator and v2 is not in use, then log collections should always be disabled + + # Case 11: + # - Cgroups are disabled in the configurator + # - Cgroup v2 is not in use + # - Cgroup v2 resource limiting conf is True + # - collect logs config flag true + with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=True, collect_logs_conf=True): + self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") + + # Case 12: + # - Cgroups are disabled in the configurator + # - Cgroup v2 is not in use + # - Cgroup v2 resource limiting conf is False + # - collect logs config flag true + with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=False, collect_logs_conf=True): + self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled") + def test_it_uploads_logs_when_collection_is_successful(self): archive_size = 42 @@ -168,7 +287,7 @@ def http_put_handler(url, _, **__): @contextlib.contextmanager -def _create_log_collector_monitor_handler(iterations=1): +def _create_log_collector_monitor_handler(iterations=1, cgroup_version=CgroupVersions.V1): """ Creates an instance of LogCollectorMonitorHandler that * Runs its main loop only the number of times given in the 'iterations' parameter, and @@ -184,22 +303,40 @@ def _create_log_collector_monitor_handler(iterations=1): original_read_file = fileutil.read_file - def mock_read_file(filepath, **args): + def mock_read_file_v1(filepath, **args): if filepath == "/proc/stat": - filepath = os.path.join(data_dir, "cgroups", "proc_stat_t0") + filepath = os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0") elif filepath.endswith("/cpuacct.stat"): - filepath = os.path.join(data_dir, "cgroups", "cpuacct.stat_t0") + filepath = os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0") + return original_read_file(filepath, **args) + + def mock_read_file_v2(filepath, **args): + if filepath == "/proc/uptime": + filepath = os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0") + elif filepath.endswith("/cpu.stat"): + filepath = os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0") return original_read_file(filepath, **args) + mock_read_file = None + cgroups = [] + if cgroup_version == "v1": + mock_read_file = mock_read_file_v1 + cgroups = [ + CpuControllerV1("test", "dummy_cpu_path"), + MemoryControllerV1("test", "dummy_memory_path") + ] + else: + mock_read_file = mock_read_file_v2 + cgroups = [ + CpuControllerV2("test", "dummy_cpu_path"), + MemoryControllerV2("test", "dummy_memory_path") + ] + with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file): def run_and_wait(): monitor_log_collector.run() monitor_log_collector.join() - cgroups = [ - CpuCgroup("test", "dummy_cpu_path"), - MemoryCgroup("test", "dummy_memory_path") - ] monitor_log_collector = get_log_collector_monitor_handler(cgroups) monitor_log_collector.run_and_wait = run_and_wait yield monitor_log_collector @@ -207,33 +344,78 @@ def run_and_wait(): class TestLogCollectorMonitorHandler(AgentTestCase): - @patch('azurelinuxagent.common.event.EventLogger.add_metric') - @patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage") - def test_send_extension_metrics_telemetry(self, patch_poll_resource_usage, patch_add_metric): + def test_get_max_recorded_metrics(self): + with _create_log_collector_monitor_handler(iterations=2) as log_collector_monitor_handler: + nonlocal_vars = { + 'cpu_iteration': 0, + 'mem_iteration': 0, + 'multiplier': 5 + } + + def get_different_cpu_metrics(**kwargs): # pylint: disable=W0613 + metrics = [MetricValue("Process", MetricsCounter.PROCESSOR_PERCENT_TIME, "service", 4.5), MetricValue("Process", MetricsCounter.THROTTLED_TIME, "service", nonlocal_vars['cpu_iteration']*nonlocal_vars['multiplier'] + 10.000)] + nonlocal_vars['cpu_iteration'] += 1 + return metrics + + def get_different_memory_metrics(**kwargs): # pylint: disable=W0613 + metrics = [MetricValue("Memory", MetricsCounter.TOTAL_MEM_USAGE, "service", 20), + MetricValue("Memory", MetricsCounter.ANON_MEM_USAGE, "service", 15), + MetricValue("Memory", MetricsCounter.CACHE_MEM_USAGE, "service", nonlocal_vars['mem_iteration']*nonlocal_vars['multiplier'] + 5), + MetricValue("Memory", MetricsCounter.MAX_MEM_USAGE, "service", 30), + MetricValue("Memory", MetricsCounter.SWAP_MEM_USAGE, "service", 0)] + nonlocal_vars['mem_iteration'] += 1 + return metrics + + with patch("azurelinuxagent.ga.cpucontroller._CpuController.get_tracked_metrics", side_effect=get_different_cpu_metrics): + with patch("azurelinuxagent.ga.memorycontroller._MemoryController.get_tracked_metrics", side_effect=get_different_memory_metrics): + log_collector_monitor_handler.run_and_wait() + max_recorded_metrics = log_collector_monitor_handler.get_max_recorded_metrics() + self.assertEqual(len(max_recorded_metrics), 7) + self.assertEqual(max_recorded_metrics[MetricsCounter.PROCESSOR_PERCENT_TIME], 4.5) + self.assertEqual(max_recorded_metrics[MetricsCounter.THROTTLED_TIME], 15.0) + self.assertEqual(max_recorded_metrics[MetricsCounter.TOTAL_MEM_USAGE], 20) + self.assertEqual(max_recorded_metrics[MetricsCounter.ANON_MEM_USAGE], 15) + self.assertEqual(max_recorded_metrics[MetricsCounter.CACHE_MEM_USAGE], 10) + self.assertEqual(max_recorded_metrics[MetricsCounter.MAX_MEM_USAGE], 30) + self.assertEqual(max_recorded_metrics[MetricsCounter.SWAP_MEM_USAGE], 0) + + def test_verify_log_collector_memory_limit_exceeded(self): with _create_log_collector_monitor_handler() as log_collector_monitor_handler: - patch_poll_resource_usage.return_value = [MetricValue("Process", "% Processor Time", "service", 1), - MetricValue("Process", "Throttled Time", "service", 1), - MetricValue("Memory", "Total Memory Usage", "service", 1), - MetricValue("Memory", "Max Memory Usage", "service", 1), - MetricValue("Memory", "Swap Memory Usage", "service", 1) - ] - log_collector_monitor_handler.run_and_wait() - self.assertEqual(1, patch_poll_resource_usage.call_count) - self.assertEqual(5, patch_add_metric.call_count) # Five metrics being sent. - - @patch("os._exit", side_effect=Exception) - @patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage") - def test_verify_log_collector_memory_limit_exceeded(self, patch_poll_resource_usage, mock_exit): + cache_exceeded = [MetricValue("Process", MetricsCounter.PROCESSOR_PERCENT_TIME, "service", 4.5), + MetricValue("Process", MetricsCounter.THROTTLED_TIME, "service", 10.281), + MetricValue("Memory", MetricsCounter.TOTAL_MEM_USAGE, "service", 170 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.ANON_MEM_USAGE, "service", 15 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.CACHE_MEM_USAGE, "service", 160 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.MAX_MEM_USAGE, "service", 171 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.SWAP_MEM_USAGE, "service", 0)] + with patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage", return_value=cache_exceeded): + with patch("os._exit") as mock_exit: + log_collector_monitor_handler.run_and_wait() + self.assertEqual(mock_exit.call_count, 1) + with _create_log_collector_monitor_handler() as log_collector_monitor_handler: - with patch("azurelinuxagent.ga.cgroupconfigurator.LOGCOLLECTOR_MEMORY_LIMIT", 8): - patch_poll_resource_usage.return_value = [MetricValue("Process", "% Processor Time", "service", 1), - MetricValue("Process", "Throttled Time", "service", 1), - MetricValue("Memory", "Total Memory Usage", "service", 9), - MetricValue("Memory", "Max Memory Usage", "service", 7), - MetricValue("Memory", "Swap Memory Usage", "service", 0) - - ] - try: + anon_exceeded = [MetricValue("Process", MetricsCounter.PROCESSOR_PERCENT_TIME, "service", 4.5), + MetricValue("Process", MetricsCounter.THROTTLED_TIME, "service", 10.281), + MetricValue("Memory", MetricsCounter.TOTAL_MEM_USAGE, "service", 170 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.ANON_MEM_USAGE, "service", 30 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.CACHE_MEM_USAGE, "service", 140 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.MAX_MEM_USAGE, "service", 171 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.SWAP_MEM_USAGE, "service", 0)] + with patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage", return_value=anon_exceeded): + with patch("os._exit") as mock_exit: + log_collector_monitor_handler.run_and_wait() + self.assertEqual(mock_exit.call_count, 1) + + with _create_log_collector_monitor_handler(cgroup_version=CgroupVersions.V2) as log_collector_monitor_handler: + mem_throttled_exceeded = [MetricValue("Process", MetricsCounter.PROCESSOR_PERCENT_TIME, "service", 4.5), + MetricValue("Process", MetricsCounter.THROTTLED_TIME, "service", 10.281), + MetricValue("Memory", MetricsCounter.TOTAL_MEM_USAGE, "service", 170 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.ANON_MEM_USAGE, "service", 15 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.CACHE_MEM_USAGE, "service", 140 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.MAX_MEM_USAGE, "service", 171 * 1024 ** 2), + MetricValue("Memory", MetricsCounter.SWAP_MEM_USAGE, "service", 0), + MetricValue("Memory", MetricsCounter.MEM_THROTTLED, "service", 11)] + with patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage", return_value=mem_throttled_exceeded): + with patch("os._exit") as mock_exit: log_collector_monitor_handler.run_and_wait() - except Exception: self.assertEqual(mock_exit.call_count, 1) diff --git a/tests/ga/test_cpucontroller.py b/tests/ga/test_cpucontroller.py new file mode 100644 index 0000000000..bc5fc4070e --- /dev/null +++ b/tests/ga/test_cpucontroller.py @@ -0,0 +1,313 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import errno +import os +import random +import shutil + +from azurelinuxagent.ga.cgroupcontroller import MetricsCounter +from azurelinuxagent.ga.cpucontroller import CpuControllerV1, CpuControllerV2 +from azurelinuxagent.common.exception import CGroupsException +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.utils import fileutil +from tests.lib.tools import AgentTestCase, patch, data_dir + + +def consume_cpu_time(): + waste = 0 + for x in range(1, 200000): # pylint: disable=unused-variable + waste += random.random() + return waste + + +class TestCpuControllerV1(AgentTestCase): + @classmethod + def setUpClass(cls): + AgentTestCase.setUpClass() + + original_read_file = fileutil.read_file + + # + # Tests that need to mock the contents of /proc/stat or */cpuacct/stat can set this map from + # the file that needs to be mocked to the mock file (each test starts with an empty map). If + # an Exception is given instead of a path, the exception is raised + # + cls.mock_read_file_map = {} + + def mock_read_file(filepath, **args): + if filepath in cls.mock_read_file_map: + mapped_value = cls.mock_read_file_map[filepath] + if isinstance(mapped_value, Exception): + raise mapped_value + filepath = mapped_value + return original_read_file(filepath, **args) + + cls.mock_read_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file) + cls.mock_read_file.start() + + @classmethod + def tearDownClass(cls): + cls.mock_read_file.stop() + AgentTestCase.tearDownClass() + + def setUp(self): + AgentTestCase.setUp(self) + TestCpuControllerV1.mock_read_file_map.clear() + + def test_initialize_cpu_usage_v1_should_set_current_cpu_usage(self): + controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test") + + TestCpuControllerV1.mock_read_file_map = { + "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0"), + os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0") + } + + controller.initialize_cpu_usage() + + self.assertEqual(controller._current_cgroup_cpu, 63763) + self.assertEqual(controller._current_system_cpu, 5496872) + + def test_get_cpu_usage_v1_should_return_the_cpu_usage_since_its_last_invocation(self): + osutil = get_osutil() + + controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test") + + TestCpuControllerV1.mock_read_file_map = { + "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0"), + os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0") + } + + controller.initialize_cpu_usage() + + TestCpuControllerV1.mock_read_file_map = { + "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t1"), + os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t1") + } + + cpu_usage = controller.get_cpu_usage() + + self.assertEqual(cpu_usage, round(100.0 * 0.000307697876885 * osutil.get_processor_cores(), 3)) + + TestCpuControllerV1.mock_read_file_map = { + "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t2"), + os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t2") + } + + cpu_usage = controller.get_cpu_usage() + + self.assertEqual(cpu_usage, round(100.0 * 0.000445181085968 * osutil.get_processor_cores(), 3)) + + def test_initialize_cpu_usage_v1_should_set_the_cgroup_usage_to_0_when_the_cgroup_does_not_exist(self): + controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test") + + io_error_2 = IOError() + io_error_2.errno = errno.ENOENT # "No such directory" + + TestCpuControllerV1.mock_read_file_map = { + "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0"), + os.path.join(controller.path, "cpuacct.stat"): io_error_2 + } + + controller.initialize_cpu_usage() + + self.assertEqual(controller._current_cgroup_cpu, 0) + self.assertEqual(controller._current_system_cpu, 5496872) # check the system usage just for test sanity + + def test_initialize_cpu_usage_v1_should_raise_an_exception_when_called_more_than_once(self): + controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test") + + TestCpuControllerV1.mock_read_file_map = { + "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0"), + os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0") + } + + controller.initialize_cpu_usage() + + with self.assertRaises(CGroupsException): + controller.initialize_cpu_usage() + + def test_get_cpu_usage_v1_should_raise_an_exception_when_initialize_cpu_usage_has_not_been_invoked(self): + controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test") + + with self.assertRaises(CGroupsException): + cpu_usage = controller.get_cpu_usage() # pylint: disable=unused-variable + + def test_get_throttled_time_v1_should_return_the_value_since_its_last_invocation(self): + test_file = os.path.join(self.tmp_dir, "cpu.stat") + shutil.copyfile(os.path.join(data_dir, "cgroups", "v1", "cpu.stat_t0"), test_file) # throttled_time = 50 + controller = CpuControllerV1("test", self.tmp_dir) + controller.initialize_cpu_usage() + shutil.copyfile(os.path.join(data_dir, "cgroups", "v1", "cpu.stat_t1"), test_file) # throttled_time = 2075541442327 + + throttled_time = controller.get_cpu_throttled_time() + + self.assertEqual(throttled_time, round(float(2075541442327 - 50) / 1E9, 3), "The value of throttled_time is incorrect") + + def test_get_tracked_metrics_v1_should_return_the_throttled_time(self): + controller = CpuControllerV1("test", os.path.join(data_dir, "cgroups", "v1")) + controller.initialize_cpu_usage() + + def find_throttled_time(metrics): + return [m for m in metrics if m.counter == MetricsCounter.THROTTLED_TIME] + + found = find_throttled_time(controller.get_tracked_metrics()) + self.assertTrue(len(found) == 0, "get_tracked_metrics should not fetch the throttled time by default. Found: {0}".format(found)) + + found = find_throttled_time(controller.get_tracked_metrics(track_throttled_time=True)) + self.assertTrue(len(found) == 1, "get_tracked_metrics should have fetched the throttled time by default. Found: {0}".format(found)) + + +class TestCpuControllerV2(AgentTestCase): + @classmethod + def setUpClass(cls): + AgentTestCase.setUpClass() + + original_read_file = fileutil.read_file + + # + # Tests that need to mock the contents of /proc/stat or */cpuacct/stat can set this map from + # the file that needs to be mocked to the mock file (each test starts with an empty map). If + # an Exception is given instead of a path, the exception is raised + # + cls.mock_read_file_map = {} + + def mock_read_file(filepath, **args): + if filepath in cls.mock_read_file_map: + mapped_value = cls.mock_read_file_map[filepath] + if isinstance(mapped_value, Exception): + raise mapped_value + filepath = mapped_value + return original_read_file(filepath, **args) + + cls.mock_read_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file) + cls.mock_read_file.start() + + @classmethod + def tearDownClass(cls): + cls.mock_read_file.stop() + AgentTestCase.tearDownClass() + + def setUp(self): + AgentTestCase.setUp(self) + TestCpuControllerV2.mock_read_file_map.clear() + + def test_initialize_cpu_usage_v2_should_set_current_cpu_usage(self): + controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test") + + TestCpuControllerV2.mock_read_file_map = { + "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0"), + os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0") + } + + controller.initialize_cpu_usage() + + self.assertEqual(controller._current_cgroup_cpu, 817045397 / 1E6) + self.assertEqual(controller._current_system_cpu, 776968.02) + + def test_get_cpu_usage_v2_should_return_the_cpu_usage_since_its_last_invocation(self): + controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test") + + TestCpuControllerV2.mock_read_file_map = { + "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0"), + os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0") + } + + controller.initialize_cpu_usage() + + TestCpuControllerV2.mock_read_file_map = { + "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t1"), + os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t1") + } + + cpu_usage = controller.get_cpu_usage() + + cgroup_usage_delta = (819624087 / 1E6) - (817045397 / 1E6) + system_usage_delta = 777350.57 - 776968.02 + self.assertEqual(cpu_usage, round(100.0 * cgroup_usage_delta/system_usage_delta, 3)) + + TestCpuControllerV2.mock_read_file_map = { + "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t2"), + os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t2") + } + + cpu_usage = controller.get_cpu_usage() + + cgroup_usage_delta = (822052295 / 1E6) - (819624087 / 1E6) + system_usage_delta = 779218.68 - 777350.57 + self.assertEqual(cpu_usage, round(100.0 * cgroup_usage_delta/system_usage_delta, 3)) + + def test_initialize_cpu_usage_v2_should_set_the_cgroup_usage_to_0_when_the_cgroup_does_not_exist(self): + controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test") + + io_error_2 = IOError() + io_error_2.errno = errno.ENOENT # "No such directory" + + TestCpuControllerV2.mock_read_file_map = { + "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0"), + os.path.join(controller.path, "cpu.stat"): io_error_2 + } + + controller.initialize_cpu_usage() + + self.assertEqual(controller._current_cgroup_cpu, 0) + self.assertEqual(controller._current_system_cpu, 776968.02) # check the system usage just for test sanity + + def test_initialize_cpu_usage_v2_should_raise_an_exception_when_called_more_than_once(self): + controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test") + + TestCpuControllerV2.mock_read_file_map = { + "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0"), + os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0") + } + + controller.initialize_cpu_usage() + + with self.assertRaises(CGroupsException): + controller.initialize_cpu_usage() + + def test_get_cpu_usage_v2_should_raise_an_exception_when_initialize_cpu_usage_has_not_been_invoked(self): + controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test") + + with self.assertRaises(CGroupsException): + cpu_usage = controller.get_cpu_usage() # pylint: disable=unused-variable + + def test_get_throttled_time_v2_should_return_the_value_since_its_last_invocation(self): + test_file = os.path.join(self.tmp_dir, "cpu.stat") + shutil.copyfile(os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0"), test_file) # throttled_time = 15735198706 + controller = CpuControllerV2("test", self.tmp_dir) + controller.initialize_cpu_usage() + shutil.copyfile(os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t1"), test_file) # throttled_usec = 15796563650 + + throttled_time = controller.get_cpu_throttled_time() + + self.assertEqual(throttled_time, round(float(15796563650 - 15735198706) / 1E6, 3), "The value of throttled_time is incorrect") + + def test_get_tracked_metrics_v2_should_return_the_throttled_time(self): + controller = CpuControllerV2("test", os.path.join(data_dir, "cgroups", "v2")) + controller.initialize_cpu_usage() + + def find_throttled_time(metrics): + return [m for m in metrics if m.counter == MetricsCounter.THROTTLED_TIME] + + found = find_throttled_time(controller.get_tracked_metrics()) + self.assertTrue(len(found) == 0, "get_tracked_metrics should not fetch the throttled time by default. Found: {0}".format(found)) + + found = find_throttled_time(controller.get_tracked_metrics(track_throttled_time=True)) + self.assertTrue(len(found) == 1, "get_tracked_metrics should have fetched the throttled time by default. Found: {0}".format(found)) diff --git a/tests/ga/test_extension.py b/tests/ga/test_extension.py index 62bd11099d..95b2427bce 100644 --- a/tests/ga/test_extension.py +++ b/tests/ga/test_extension.py @@ -63,10 +63,6 @@ SUCCESS_CODE_FROM_STATUS_FILE = 1 -def do_not_run_test(): - return True - - def raise_system_exception(): raise Exception diff --git a/tests/ga/test_exthandlers.py b/tests/ga/test_exthandlers.py index f56ebce14b..3252dcb239 100644 --- a/tests/ga/test_exthandlers.py +++ b/tests/ga/test_exthandlers.py @@ -681,7 +681,7 @@ def test_it_should_read_only_the_head_of_large_outputs(self): self.assertGreaterEqual(len(output), 1024) self.assertLessEqual(len(output), TELEMETRY_MESSAGE_MAX_LEN) - mock_format.assert_called_once() + self.assertEqual(1, mock_format.call_count, "format_stdout_stderr should be called once") args, kwargs = mock_format.call_args # pylint: disable=unused-variable stdout, stderr = args diff --git a/tests/ga/test_exthandlers_download_extension.py b/tests/ga/test_exthandlers_download_extension.py index b3ed96a89a..9f56a0202f 100644 --- a/tests/ga/test_exthandlers_download_extension.py +++ b/tests/ga/test_exthandlers_download_extension.py @@ -127,8 +127,8 @@ def stream(_, destination, **__): self.ext_handler_instance.download() # first download attempt should succeed - mock_stream.assert_called_once() - mock_report_event.assert_called_once() + self.assertEqual(1, mock_stream.call_count, "wireserver stream should be called once") + self.assertEqual(1, mock_report_event.call_count, "report_event should be called once") self._assert_download_and_expand_succeeded() @@ -154,7 +154,7 @@ def stream(_, destination, **__): with DownloadExtensionTestCase.create_mock_stream(stream) as mock_stream: self.ext_handler_instance.download() - mock_stream.assert_called_once() + self.assertEqual(1, mock_stream.call_count, "wireserver stream should be called once") self._assert_download_and_expand_succeeded() @@ -179,7 +179,8 @@ def stream(_, destination, **__): with DownloadExtensionTestCase.create_mock_stream(stream) as mock_stream: self.ext_handler_instance.download() - mock_stream.assert_called_once() + self.assertEqual(1, mock_stream.call_count, "wireserver stream should be called once") + self._assert_download_and_expand_succeeded() self.assertEqual(self.ext_handler_instance.get_handler_state(), ExtHandlerState.NotInstalled, "Ensure that the state is maintained for extension HandlerState") diff --git a/tests/ga/test_exthandlers_exthandlerinstance.py b/tests/ga/test_exthandlers_exthandlerinstance.py index 846bb89e92..5b98c9f41c 100644 --- a/tests/ga/test_exthandlers_exthandlerinstance.py +++ b/tests/ga/test_exthandlers_exthandlerinstance.py @@ -117,7 +117,7 @@ def test_rm_ext_handler_dir_should_report_an_event_if_an_error_occurs_while_dele def mock_remove(path, dir_fd=None): # pylint: disable=unused-argument if path.endswith("extension_file2"): - raise IOError("A mocked error") + raise IOError(999,"A mocked error","extension_file2") original_remove_api(path) with patch.object(shutil.os, remove_api_name, mock_remove): diff --git a/tests/ga/test_logcollector.py b/tests/ga/test_logcollector.py index cedf894b09..6a8be83afb 100644 --- a/tests/ga/test_logcollector.py +++ b/tests/ga/test_logcollector.py @@ -79,9 +79,9 @@ def _mock_cgroup(cls): def mock_read_file(filepath, **args): if filepath == "/proc/stat": - filepath = os.path.join(data_dir, "cgroups", "proc_stat_t0") + filepath = os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0") elif filepath.endswith("/cpuacct.stat"): - filepath = os.path.join(data_dir, "cgroups", "cpuacct.stat_t0") + filepath = os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0") return original_read_file(filepath, **args) cls._mock_read_cpu_cgroup_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file) @@ -213,7 +213,7 @@ def test_log_collector_parses_commands_in_manifest(self): with patch("azurelinuxagent.ga.logcollector.MANIFEST_NORMAL", manifest): with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'): log_collector = LogCollector() - archive = log_collector.collect_logs_and_get_archive() + archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive() with open(self.output_results_file_path, "r") as fh: results = fh.readlines() @@ -227,6 +227,7 @@ def test_log_collector_parses_commands_in_manifest(self): # Assert copy was parsed self._assert_archive_created(archive) self._assert_files_are_in_archive(expected_files=[file_to_collect]) + self.assertEqual(uncompressed_file_size, os.path.getsize(file_to_collect)) no_files = self._get_number_of_files_in_archive() self.assertEqual(1, no_files, "Expected 1 file in archive, found {0}!".format(no_files)) @@ -242,10 +243,11 @@ def test_log_collector_uses_full_manifest_when_full_mode_enabled(self): with patch("azurelinuxagent.ga.logcollector.MANIFEST_FULL", manifest): with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'): log_collector = LogCollector(is_full_mode=True) - archive = log_collector.collect_logs_and_get_archive() + archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive() self._assert_archive_created(archive) self._assert_files_are_in_archive(expected_files=[file_to_collect]) + self.assertEqual(uncompressed_file_size, os.path.getsize(file_to_collect)) no_files = self._get_number_of_files_in_archive() self.assertEqual(1, no_files, "Expected 1 file in archive, found {0}!".format(no_files)) @@ -256,7 +258,7 @@ def test_log_collector_should_collect_all_files(self): with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'): log_collector = LogCollector() - archive = log_collector.collect_logs_and_get_archive() + archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive() self._assert_archive_created(archive) @@ -269,6 +271,10 @@ def test_log_collector_should_collect_all_files(self): os.path.join(self.root_collect_dir, "another_dir", "least_important_file") ] self._assert_files_are_in_archive(expected_files) + expected_total_uncompressed_size = 0 + for file in expected_files: + expected_total_uncompressed_size += os.path.getsize(file) + self.assertEqual(uncompressed_file_size, expected_total_uncompressed_size) no_files = self._get_number_of_files_in_archive() self.assertEqual(6, no_files, "Expected 6 files in archive, found {0}!".format(no_files)) @@ -278,7 +284,7 @@ def test_log_collector_should_truncate_large_text_files_and_ignore_large_binary_ with patch("azurelinuxagent.ga.logcollector._FILE_SIZE_LIMIT", SMALL_FILE_SIZE): with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'): log_collector = LogCollector() - archive = log_collector.collect_logs_and_get_archive() + archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive() self._assert_archive_created(archive) @@ -294,6 +300,13 @@ def test_log_collector_should_truncate_large_text_files_and_ignore_large_binary_ ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) + total_uncompressed_file_size = 0 + for file in expected_files: + if file.startswith("truncated_"): + total_uncompressed_file_size += SMALL_FILE_SIZE + else: + total_uncompressed_file_size += os.path.getsize(file) + self.assertEqual(total_uncompressed_file_size, uncompressed_file_size) no_files = self._get_number_of_files_in_archive() self.assertEqual(5, no_files, "Expected 5 files in archive, found {0}!".format(no_files)) @@ -312,7 +325,7 @@ def test_log_collector_should_prioritize_important_files_if_archive_too_big(self with patch("azurelinuxagent.ga.logcollector._MUST_COLLECT_FILES", must_collect_files): with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'): log_collector = LogCollector() - archive = log_collector.collect_logs_and_get_archive() + archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive() self._assert_archive_created(archive) @@ -328,6 +341,10 @@ def test_log_collector_should_prioritize_important_files_if_archive_too_big(self ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) + expected_total_uncompressed_size = 0 + for file in expected_files: + expected_total_uncompressed_size += os.path.getsize(file) + self.assertEqual(uncompressed_file_size, expected_total_uncompressed_size) no_files = self._get_number_of_files_in_archive() self.assertEqual(3, no_files, "Expected 3 files in archive, found {0}!".format(no_files)) @@ -338,7 +355,7 @@ def test_log_collector_should_prioritize_important_files_if_archive_too_big(self with patch("azurelinuxagent.ga.logcollector._UNCOMPRESSED_ARCHIVE_SIZE_LIMIT", 10 * 1024 * 1024): with patch("azurelinuxagent.ga.logcollector._MUST_COLLECT_FILES", must_collect_files): - second_archive = log_collector.collect_logs_and_get_archive() + second_archive, second_uncompressed_file_size = log_collector.collect_logs_and_get_archive() expected_files = [ os.path.join(self.root_collect_dir, "waagent.log"), @@ -352,6 +369,10 @@ def test_log_collector_should_prioritize_important_files_if_archive_too_big(self ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) + expected_total_uncompressed_size = 0 + for file in expected_files: + expected_total_uncompressed_size += os.path.getsize(file) + self.assertEqual(second_uncompressed_file_size, expected_total_uncompressed_size) self._assert_archive_created(second_archive) @@ -363,7 +384,7 @@ def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_d # needs to be updated in the archive, deleted if removed from disk, and added if not previously seen. with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'): log_collector = LogCollector() - first_archive = log_collector.collect_logs_and_get_archive() + first_archive, first_uncompressed_file_size = log_collector.collect_logs_and_get_archive() self._assert_archive_created(first_archive) # Everything should be in the archive @@ -376,6 +397,10 @@ def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_d os.path.join(self.root_collect_dir, "another_dir", "least_important_file") ] self._assert_files_are_in_archive(expected_files) + expected_total_uncompressed_size = 0 + for file in expected_files: + expected_total_uncompressed_size += os.path.getsize(file) + self.assertEqual(first_uncompressed_file_size, expected_total_uncompressed_size) no_files = self._get_number_of_files_in_archive() self.assertEqual(6, no_files, "Expected 6 files in archive, found {0}!".format(no_files)) @@ -392,7 +417,7 @@ def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_d LARGE_FILE_SIZE) rm_files(os.path.join(self.root_collect_dir, "waagent.log.1")) - second_archive = log_collector.collect_logs_and_get_archive() + second_archive, second_uncompressed_file_size = log_collector.collect_logs_and_get_archive() self._assert_archive_created(second_archive) expected_files = [ @@ -408,6 +433,10 @@ def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_d ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) + expected_total_uncompressed_size = 0 + for file in expected_files: + expected_total_uncompressed_size += os.path.getsize(file) + self.assertEqual(second_uncompressed_file_size, expected_total_uncompressed_size) file = os.path.join(self.root_collect_dir, "waagent.log") # pylint: disable=redefined-builtin new_file_size = self._get_uncompressed_file_size(file) @@ -434,7 +463,7 @@ def test_log_collector_should_clean_up_uncollected_truncated_files(self): with patch("azurelinuxagent.ga.logcollector._FILE_SIZE_LIMIT", SMALL_FILE_SIZE): with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'): log_collector = LogCollector() - archive = log_collector.collect_logs_and_get_archive() + archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive() self._assert_archive_created(archive) @@ -443,6 +472,13 @@ def test_log_collector_should_clean_up_uncollected_truncated_files(self): self._truncated_path(os.path.join(self.root_collect_dir, "waagent.log.1")), # this file should be truncated ] self._assert_files_are_in_archive(expected_files) + expected_total_uncompressed_size = 0 + for file in expected_files: + if file.startswith("truncated_"): + expected_total_uncompressed_size += SMALL_FILE_SIZE + else: + expected_total_uncompressed_size += os.path.getsize(file) + self.assertEqual(uncompressed_file_size, expected_total_uncompressed_size) no_files = self._get_number_of_files_in_archive() self.assertEqual(2, no_files, "Expected 2 files in archive, found {0}!".format(no_files)) @@ -456,7 +492,7 @@ def test_log_collector_should_clean_up_uncollected_truncated_files(self): with patch("azurelinuxagent.ga.logcollector._FILE_SIZE_LIMIT", SMALL_FILE_SIZE): with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'): log_collector = LogCollector() - second_archive = log_collector.collect_logs_and_get_archive() + second_archive, second_uncompressed_file_size = log_collector.collect_logs_and_get_archive() expected_files = [ os.path.join(self.root_collect_dir, "waagent.log"), @@ -467,6 +503,13 @@ def test_log_collector_should_clean_up_uncollected_truncated_files(self): ] self._assert_files_are_in_archive(expected_files) self._assert_files_are_not_in_archive(unexpected_files) + expected_total_uncompressed_size = 0 + for file in expected_files: + if file.startswith("truncated_"): + expected_total_uncompressed_size += SMALL_FILE_SIZE + else: + expected_total_uncompressed_size += os.path.getsize(file) + self.assertEqual(second_uncompressed_file_size, expected_total_uncompressed_size) self._assert_archive_created(second_archive) diff --git a/tests/ga/test_memorycontroller.py b/tests/ga/test_memorycontroller.py new file mode 100644 index 0000000000..1beb9a33f0 --- /dev/null +++ b/tests/ga/test_memorycontroller.py @@ -0,0 +1,124 @@ +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import errno +import os +import shutil + +from azurelinuxagent.ga.cgroupcontroller import CounterNotFound +from azurelinuxagent.ga.memorycontroller import MemoryControllerV1, MemoryControllerV2 +from tests.lib.tools import AgentTestCase, data_dir + + +class TestMemoryControllerV1(AgentTestCase): + def test_get_metrics_v1(self): + test_mem_controller = MemoryControllerV1("test_extension", os.path.join(data_dir, "cgroups", "v1")) + + rss_memory_usage, cache_memory_usage = test_mem_controller.get_memory_usage() + self.assertEqual(100000, rss_memory_usage) + self.assertEqual(50000, cache_memory_usage) + + max_memory_usage = test_mem_controller.get_max_memory_usage() + self.assertEqual(1000000, max_memory_usage) + + swap_memory_usage = test_mem_controller.try_swap_memory_usage() + self.assertEqual(20000, swap_memory_usage) + + def test_get_metrics_v1_when_files_not_present(self): + test_mem_controller = MemoryControllerV1("test_extension", os.path.join(data_dir, "cgroups")) + + with self.assertRaises(IOError) as e: + test_mem_controller.get_memory_usage() + + self.assertEqual(e.exception.errno, errno.ENOENT) + + with self.assertRaises(IOError) as e: + test_mem_controller.get_max_memory_usage() + + self.assertEqual(e.exception.errno, errno.ENOENT) + + with self.assertRaises(IOError) as e: + test_mem_controller.try_swap_memory_usage() + + self.assertEqual(e.exception.errno, errno.ENOENT) + + def test_get_memory_usage_v1_counters_not_found(self): + test_file = os.path.join(self.tmp_dir, "memory.stat") + shutil.copyfile(os.path.join(data_dir, "cgroups", "v1", "memory.stat_missing"), test_file) + test_mem_controller = MemoryControllerV1("test_extension", self.tmp_dir) + + with self.assertRaises(CounterNotFound): + test_mem_controller.get_memory_usage() + + swap_memory_usage = test_mem_controller.try_swap_memory_usage() + self.assertEqual(0, swap_memory_usage) + + +class TestMemoryControllerV2(AgentTestCase): + def test_get_metrics_v2(self): + test_mem_controller = MemoryControllerV2("test_extension", os.path.join(data_dir, "cgroups", "v2")) + + anon_memory_usage, cache_memory_usage = test_mem_controller.get_memory_usage() + self.assertEqual(17589300, anon_memory_usage) + self.assertEqual(134553600, cache_memory_usage) + + max_memory_usage = test_mem_controller.get_max_memory_usage() + self.assertEqual(194494464, max_memory_usage) + + swap_memory_usage = test_mem_controller.try_swap_memory_usage() + self.assertEqual(20000, swap_memory_usage) + + memory_throttled_events = test_mem_controller.get_memory_throttled_events() + self.assertEqual(9, memory_throttled_events) + + def test_get_metrics_v2_when_files_not_present(self): + test_mem_controller = MemoryControllerV2("test_extension", os.path.join(data_dir, "cgroups")) + + with self.assertRaises(IOError) as e: + test_mem_controller.get_memory_usage() + + self.assertEqual(e.exception.errno, errno.ENOENT) + + with self.assertRaises(IOError) as e: + test_mem_controller.get_max_memory_usage() + + self.assertEqual(e.exception.errno, errno.ENOENT) + + with self.assertRaises(IOError) as e: + test_mem_controller.try_swap_memory_usage() + + self.assertEqual(e.exception.errno, errno.ENOENT) + + with self.assertRaises(IOError) as e: + test_mem_controller.get_memory_throttled_events() + + self.assertEqual(e.exception.errno, errno.ENOENT) + + def test_get_memory_usage_v1_counters_not_found(self): + test_stat_file = os.path.join(self.tmp_dir, "memory.stat") + shutil.copyfile(os.path.join(data_dir, "cgroups", "v2", "memory.stat_missing"), test_stat_file) + test_events_file = os.path.join(self.tmp_dir, "memory.events") + shutil.copyfile(os.path.join(data_dir, "cgroups", "v2", "memory.stat_missing"), test_events_file) + test_mem_controller = MemoryControllerV2("test_extension", self.tmp_dir) + + with self.assertRaises(CounterNotFound): + test_mem_controller.get_memory_usage() + + with self.assertRaises(CounterNotFound): + test_mem_controller.get_memory_throttled_events() diff --git a/tests/ga/test_monitor.py b/tests/ga/test_monitor.py index 1dbec27c39..a2100cde58 100644 --- a/tests/ga/test_monitor.py +++ b/tests/ga/test_monitor.py @@ -21,12 +21,14 @@ import string from azurelinuxagent.common import event, logger -from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup, MetricValue, _REPORT_EVERY_HOUR +from azurelinuxagent.ga.cgroupcontroller import MetricValue, _REPORT_EVERY_HOUR from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry from azurelinuxagent.common.event import EVENTS_DIRECTORY from azurelinuxagent.common.protocol.healthservice import HealthService from azurelinuxagent.common.protocol.util import ProtocolUtil from azurelinuxagent.common.protocol.wire import WireProtocol +from azurelinuxagent.ga.cpucontroller import CpuControllerV1 +from azurelinuxagent.ga.memorycontroller import MemoryControllerV1 from azurelinuxagent.ga.monitor import get_monitor_handler, PeriodicOperation, SendImdsHeartbeat, \ ResetPeriodicLogMessages, SendHostPluginHeartbeat, PollResourceUsage, \ ReportNetworkErrors, ReportNetworkConfigurationChanges, PollSystemWideResourceUsage @@ -222,23 +224,23 @@ def test_send_extension_metrics_telemetry_for_empty_cgroup(self, patch_poll_all_ self.assertEqual(0, patch_add_metric.call_count) @patch('azurelinuxagent.common.event.EventLogger.add_metric') - @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") + @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage") @patch('azurelinuxagent.common.logger.Logger.periodic_warn') def test_send_extension_metrics_telemetry_handling_memory_cgroup_exceptions_errno2(self, patch_periodic_warn, # pylint: disable=unused-argument - patch_get_memory_usage, + get_memory_usage, patch_add_metric, *args): ioerror = IOError() ioerror.errno = 2 - patch_get_memory_usage.side_effect = ioerror + get_memory_usage.side_effect = ioerror - CGroupsTelemetry._tracked["/test/path"] = MemoryCgroup("cgroup_name", "/test/path") + CGroupsTelemetry._tracked["/test/path"] = MemoryControllerV1("_cgroup_name", "/test/path") PollResourceUsage().run() self.assertEqual(0, patch_periodic_warn.call_count) self.assertEqual(0, patch_add_metric.call_count) # No metrics should be sent. @patch('azurelinuxagent.common.event.EventLogger.add_metric') - @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage") + @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage") @patch('azurelinuxagent.common.logger.Logger.periodic_warn') def test_send_extension_metrics_telemetry_handling_cpu_cgroup_exceptions_errno2(self, patch_periodic_warn, # pylint: disable=unused-argument patch_cpu_usage, patch_add_metric, @@ -247,7 +249,7 @@ def test_send_extension_metrics_telemetry_handling_cpu_cgroup_exceptions_errno2( ioerror.errno = 2 patch_cpu_usage.side_effect = ioerror - CGroupsTelemetry._tracked["/test/path"] = CpuCgroup("cgroup_name", "/test/path") + CGroupsTelemetry._tracked["/test/path"] = CpuControllerV1("_cgroup_name", "/test/path") PollResourceUsage().run() self.assertEqual(0, patch_periodic_warn.call_count) diff --git a/tests/ga/test_multi_config_extension.py b/tests/ga/test_multi_config_extension.py index 0fe8dea5a3..450ca071dc 100644 --- a/tests/ga/test_multi_config_extension.py +++ b/tests/ga/test_multi_config_extension.py @@ -41,7 +41,7 @@ def __init__(self, name, version, state="enabled"): self.version = version self.state = state self.is_invalid_setting = False - self.settings = dict() + self.settings = {} class _TestExtensionObject: def __init__(self, name, seq_no, dependency_level="0", state="enabled"): @@ -94,12 +94,11 @@ def _get_mock_expected_handler_data(self, rc_extensions, vmaccess_extensions, ge def test_it_should_parse_multi_config_settings_properly(self): self.test_data['ext_conf'] = os.path.join(self._MULTI_CONFIG_TEST_DATA, "ext_conf_with_multi_config.xml") - rc_extensions = dict() - rc_extensions["firstRunCommand"] = self._TestExtensionObject(name="firstRunCommand", seq_no=2) - rc_extensions["secondRunCommand"] = self._TestExtensionObject(name="secondRunCommand", seq_no=2, - dependency_level="3") - rc_extensions["thirdRunCommand"] = self._TestExtensionObject(name="thirdRunCommand", seq_no=1, - dependency_level="4") + rc_extensions = { + "firstRunCommand": self._TestExtensionObject(name="firstRunCommand", seq_no=2), + "secondRunCommand": self._TestExtensionObject(name="secondRunCommand", seq_no=2, dependency_level="3"), + "thirdRunCommand": self._TestExtensionObject(name="thirdRunCommand", seq_no=1, dependency_level="4") + } vmaccess_extensions = { "Microsoft.Compute.VMAccessAgent": self._TestExtensionObject(name="Microsoft.Compute.VMAccessAgent", @@ -115,12 +114,11 @@ def test_it_should_parse_multi_config_with_disable_state_properly(self): self.test_data['ext_conf'] = os.path.join(self._MULTI_CONFIG_TEST_DATA, "ext_conf_with_disabled_multi_config.xml") - rc_extensions = dict() - rc_extensions["firstRunCommand"] = self._TestExtensionObject(name="firstRunCommand", seq_no=3) - rc_extensions["secondRunCommand"] = self._TestExtensionObject(name="secondRunCommand", seq_no=3, - dependency_level="1") - rc_extensions["thirdRunCommand"] = self._TestExtensionObject(name="thirdRunCommand", seq_no=1, - dependency_level="4", state="disabled") + rc_extensions = { + "firstRunCommand": self._TestExtensionObject(name="firstRunCommand", seq_no=3), + "secondRunCommand": self._TestExtensionObject(name="secondRunCommand", seq_no=3, dependency_level="1"), + "thirdRunCommand": self._TestExtensionObject(name="thirdRunCommand", seq_no=1, dependency_level="4", state="disabled") + } vmaccess_extensions = { "Microsoft.Compute.VMAccessAgent": self._TestExtensionObject(name="Microsoft.Compute.VMAccessAgent", @@ -286,7 +284,8 @@ def __setup_generic_test_env(self): third_ext = extension_emulator(name="OSTCExtensions.ExampleHandlerLinux.thirdExtension") fourth_ext = extension_emulator(name="Microsoft.Powershell.ExampleExtension") - with self._setup_test_env(mock_manifest=True) as (exthandlers_handler, protocol, no_of_extensions): + # In _setup_test_env() contextmanager, yield is used inside an if-else block and that's creating a false positive pylint warning + with self._setup_test_env(mock_manifest=True) as (exthandlers_handler, protocol, no_of_extensions): # pylint: disable=contextmanager-generator-missing-cleanup with enable_invocations(first_ext, second_ext, third_ext, fourth_ext) as invocation_record: exthandlers_handler.run() exthandlers_handler.report_ext_handlers_status() @@ -1072,7 +1071,8 @@ def __setup_test_and_get_exts(self): dependent_sc_ext = extension_emulator(name="Microsoft.Powershell.ExampleExtension") independent_sc_ext = extension_emulator(name="Microsoft.Azure.Geneva.GenevaMonitoring", version="1.1.0") - with self._setup_test_env() as (exthandlers_handler, protocol, no_of_extensions): + # In _setup_test_env() contextmanager, yield is used inside an if-else block and that's creating a false positive pylint warning + with self._setup_test_env() as (exthandlers_handler, protocol, no_of_extensions): # pylint: disable=contextmanager-generator-missing-cleanup yield exthandlers_handler, protocol, no_of_extensions, first_ext, second_ext, third_ext, dependent_sc_ext, independent_sc_ext def test_it_should_process_dependency_chain_extensions_properly(self): diff --git a/tests/ga/test_persist_firewall_rules.py b/tests/ga/test_persist_firewall_rules.py index 5ee397baf3..7754f1efb2 100644 --- a/tests/ga/test_persist_firewall_rules.py +++ b/tests/ga/test_persist_firewall_rules.py @@ -127,13 +127,6 @@ def __assert_systemctl_called(self, cmd="enable", validate_command_called=True): else: self.assertNotIn(systemctl_command, self.__executed_commands, "Systemctl command {0} found".format(cmd)) - def __assert_systemctl_reloaded(self, validate_command_called=True): - systemctl_reload = ["systemctl", "daemon-reload"] - if validate_command_called: - self.assertIn(systemctl_reload, self.__executed_commands, "Systemctl config not reloaded") - else: - self.assertNotIn(systemctl_reload, self.__executed_commands, "Systemctl config reloaded") - def __assert_firewall_cmd_running_called(self, validate_command_called=True): cmd = PersistFirewallRulesHandler._FIREWALLD_RUNNING_CMD if validate_command_called: @@ -144,7 +137,6 @@ def __assert_firewall_cmd_running_called(self, validate_command_called=True): def __assert_network_service_setup_properly(self): self.__assert_systemctl_called(cmd="is-enabled", validate_command_called=True) self.__assert_systemctl_called(cmd="enable", validate_command_called=True) - self.__assert_systemctl_reloaded() self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.PassThrough, validate_command_called=False) self.assertTrue(os.path.exists(self._network_service_unit_file), "Service unit file should be there") self.assertTrue(os.path.exists(self._binary_file), "Binary file should be there") @@ -200,7 +192,6 @@ def __setup_and_assert_network_service_setup_scenario(self, handler, mock_popen= self.__assert_systemctl_called(cmd="is-enabled", validate_command_called=True) self.__assert_systemctl_called(cmd="enable", validate_command_called=True) - self.__assert_systemctl_reloaded(validate_command_called=True) self.__assert_firewall_cmd_running_called(validate_command_called=True) self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.QueryPassThrough, validate_command_called=False) self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.RemovePassThrough, validate_command_called=False) @@ -234,7 +225,6 @@ def test_it_should_skip_setup_if_agent_network_setup_service_already_enabled_and self.__assert_systemctl_called(cmd="is-enabled", validate_command_called=True) self.__assert_systemctl_called(cmd="enable", validate_command_called=False) - self.__assert_systemctl_reloaded(validate_command_called=False) self.__assert_firewall_cmd_running_called(validate_command_called=True) self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.QueryPassThrough, validate_command_called=False) self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.RemovePassThrough, validate_command_called=False) @@ -396,7 +386,6 @@ def test_it_should_delete_custom_service_files_if_firewalld_enabled(self): self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.PassThrough, validate_command_called=True) self.__assert_systemctl_called(cmd="is-enabled", validate_command_called=False) self.__assert_systemctl_called(cmd="enable", validate_command_called=False) - self.__assert_systemctl_reloaded(validate_command_called=False) self.assertFalse(os.path.exists(handler.get_service_file_path()), "Service unit file found") self.assertFalse(os.path.exists(os.path.join(conf.get_lib_dir(), handler.BINARY_FILE_NAME)), "Binary file found") @@ -414,3 +403,18 @@ def test_it_should_reset_service_unit_files_if_version_changed(self): mock_popen=self.__mock_network_setup_service_enabled) self.assertNotIn(test_ver, fileutil.read_file(handler.get_service_file_path()), "Test version found incorrectly") + + def test_it_should_reset_service_unit_file_if_python_version_changes(self): + with self._get_persist_firewall_rules_handler() as handler: + # 1st step - Setup the service with some python Version + python_ver = "test_python" + with patch("sys.executable", python_ver): + self.__setup_and_assert_network_service_setup_scenario(handler) + self.assertIn(python_ver, fileutil.read_file(handler.get_service_file_path()), "Python version not found") + + # 2nd step - Re-run the setup and ensure the service file set up again even if service enabled + self.__executed_commands = [] + self.__setup_and_assert_network_service_setup_scenario(handler, + mock_popen=self.__mock_network_setup_service_enabled) + self.assertNotIn(python_ver, fileutil.read_file(handler.get_service_file_path()), + "Python version found incorrectly") diff --git a/tests/ga/test_remoteaccess_handler.py b/tests/ga/test_remoteaccess_handler.py index d4f1579260..d555c55a88 100644 --- a/tests/ga/test_remoteaccess_handler.py +++ b/tests/ga/test_remoteaccess_handler.py @@ -75,15 +75,14 @@ def mock_add_event(name, op, is_success, version, message): class TestRemoteAccessHandler(AgentTestCase): - eventing_data = [()] + eventing_data = () def setUp(self): super(TestRemoteAccessHandler, self).setUp() # Since ProtocolUtil is a singleton per thread, we need to clear it to ensure that the test cases do not # reuse a previous state clear_singleton_instances(ProtocolUtil) - for data in TestRemoteAccessHandler.eventing_data: - del data + TestRemoteAccessHandler.eventing_data = () # add_user tests @patch('azurelinuxagent.common.utils.cryptutil.CryptUtil.decrypt_secret', return_value="]aPPEv}uNg1FPnl?") diff --git a/tests/ga/test_send_telemetry_events.py b/tests/ga/test_send_telemetry_events.py index a9c87dde9a..1d15b4ff7f 100644 --- a/tests/ga/test_send_telemetry_events.py +++ b/tests/ga/test_send_telemetry_events.py @@ -340,7 +340,7 @@ def test_it_should_enqueue_and_send_events_properly(self, mock_lib_dir, *_): with patch("os.path.getmtime", return_value=test_mtime): with patch('os.getpid', return_value=test_eventpid): with patch("threading.Thread.ident", new_callable=PropertyMock(return_value=test_eventtid)): - with patch("threading.Thread.getName", return_value=test_taskname): + with patch("threading.Thread.name", new_callable=PropertyMock(return_value=test_taskname)): monitor_handler.run() TestSendTelemetryEventsHandler._stop_handler(telemetry_handler) diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py index 6caa21f3c8..cf6908559d 100644 --- a/tests/ga/test_update.py +++ b/tests/ga/test_update.py @@ -20,6 +20,8 @@ from datetime import datetime, timedelta from threading import current_thread + +from azurelinuxagent.ga.agent_update_handler import INITIAL_UPDATE_STATE_FILE from azurelinuxagent.ga.guestagent import GuestAgent, GuestAgentError, \ AGENT_ERROR_FILE from tests.common.osutil.test_default import TestOSUtil @@ -52,7 +54,7 @@ from tests.lib.mock_update_handler import mock_update_handler from tests.lib.mock_wire_protocol import mock_wire_protocol, MockHttpResponse from tests.lib.wire_protocol_data import DATA_FILE, DATA_FILE_MULTIPLE_EXT, DATA_FILE_VM_SETTINGS -from tests.lib.tools import AgentTestCase, AgentTestCaseWithGetVmSizeMock, data_dir, DEFAULT, patch, load_bin_data, Mock, MagicMock, \ +from tests.lib.tools import AgentTestCase, data_dir, DEFAULT, patch, load_bin_data, Mock, MagicMock, \ clear_singleton_instances, is_python_version_26_or_34, skip_if_predicate_true from tests.lib import wire_protocol_data from tests.lib.http_request_predicates import HttpRequestPredicates @@ -119,7 +121,7 @@ def _get_update_handler(iterations=1, test_data=None, protocol=None, autoupdate_ yield update_handler, protocol -class UpdateTestCase(AgentTestCaseWithGetVmSizeMock): +class UpdateTestCase(AgentTestCase): _test_suite_tmp_dir = None _agent_zip_dir = None @@ -1037,7 +1039,7 @@ def _mock_popen(cmd, *args, **kwargs): "Not setting up persistent firewall rules as OS.EnableFirewall=False" == args[0] for (args, _) in patch_info.call_args_list), "Info not logged properly, got: {0}".format(patch_info.call_args_list)) - @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4 for now. Need to revisit to fix it") + @skip_if_predicate_true(is_python_version_26_or_34, "Disabled on Python 2.6 and 3.4, they run on containers where the OS commands needed by the test are not present.") def test_it_should_setup_persistent_firewall_rules_on_startup(self): iterations = 1 executed_commands = [] @@ -1200,7 +1202,8 @@ def test_it_should_not_set_dns_tcp_iptable_if_drop_and_accept_available(self): @contextlib.contextmanager def _setup_test_for_ext_event_dirs_retention(self): try: - with _get_update_handler(test_data=DATA_FILE_MULTIPLE_EXT, autoupdate_enabled=False) as (update_handler, protocol): + # In _get_update_handler() contextmanager, yield is used inside an if-else block and that's creating a false positive pylint warning + with _get_update_handler(test_data=DATA_FILE_MULTIPLE_EXT, autoupdate_enabled=False) as (update_handler, protocol): # pylint: disable=contextmanager-generator-missing-cleanup with patch("azurelinuxagent.common.agent_supported_feature._ETPFeature.is_supported", True): update_handler.run(debug=True) expected_events_dirs = glob.glob(os.path.join(conf.get_ext_log_dir(), "*", EVENTS_DIRECTORY)) @@ -1281,6 +1284,9 @@ def update_goal_state_and_run_handler(autoupdate_enabled=True): protocol.set_http_handlers(http_get_handler=get_handler, http_put_handler=put_handler) + # mocking first agent update attempted + open(os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE), "a").close() + # Case 1: rsm version missing in GS when vm opt-in for rsm upgrades; report missing rsm version error protocol.mock_wire_data.set_extension_config("wire/ext_conf_version_missing_in_agent_family.xml") update_goal_state_and_run_handler() @@ -1480,11 +1486,14 @@ def create_conf_mocks(self, autoupdate_frequency, hotfix_frequency, normal_frequ @contextlib.contextmanager def __get_update_handler(self, iterations=1, test_data=None, - reload_conf=None, autoupdate_frequency=0.001, hotfix_frequency=1.0, normal_frequency=2.0): + reload_conf=None, autoupdate_frequency=0.001, hotfix_frequency=1.0, normal_frequency=2.0, initial_update_attempted=True): - test_data = DATA_FILE if test_data is None else test_data + if initial_update_attempted: + open(os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE), "a").close() - with _get_update_handler(iterations, test_data) as (update_handler, protocol): + test_data = DATA_FILE if test_data is None else test_data + # In _get_update_handler() contextmanager, yield is used inside an if-else block and that's creating a false positive pylint warning + with _get_update_handler(iterations, test_data) as (update_handler, protocol): # pylint: disable=contextmanager-generator-missing-cleanup protocol.aggregate_status = None @@ -1927,7 +1936,7 @@ def reload_conf(url, protocol): @patch('azurelinuxagent.ga.update.get_collect_logs_handler') @patch('azurelinuxagent.ga.update.get_monitor_handler') @patch('azurelinuxagent.ga.update.get_env_handler') -class MonitorThreadTest(AgentTestCaseWithGetVmSizeMock): +class MonitorThreadTest(AgentTestCase): def setUp(self): super(MonitorThreadTest, self).setUp() self.event_patch = patch('azurelinuxagent.common.event.add_event') @@ -1955,7 +1964,7 @@ def iterator(*_, **__): with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler'): with patch('azurelinuxagent.ga.agent_update_handler.get_agent_update_handler'): with patch('azurelinuxagent.ga.update.initialize_event_logger_vminfo_common_parameters'): - with patch('azurelinuxagent.ga.cgroupapi.CGroupsApi.cgroups_supported', return_value=False): # skip all cgroup stuff + with patch('azurelinuxagent.ga.cgroupapi.CGroupUtil.cgroups_supported', return_value=False): # skip all cgroup stuff with patch('azurelinuxagent.ga.update.is_log_collection_allowed', return_value=True): with patch('time.sleep'): with patch('sys.exit'): @@ -2440,11 +2449,11 @@ def test_telemetry_heartbeat_creates_event(self, patch_add_event, patch_info, *_ with mock_wire_protocol(wire_protocol_data.DATA_FILE) as mock_protocol: update_handler = get_update_handler() - + agent_update_handler = Mock() update_handler.last_telemetry_heartbeat = datetime.utcnow() - timedelta(hours=1) - update_handler._send_heartbeat_telemetry(mock_protocol) + update_handler._send_heartbeat_telemetry(mock_protocol, agent_update_handler) self.assertEqual(1, patch_add_event.call_count) - self.assertTrue(any(call_args[0] == "[HEARTBEAT] Agent {0} is running as the goal state agent {1}" + self.assertTrue(any(call_args[0] == "[HEARTBEAT] Agent {0} is running as the goal state agent [DEBUG {1}]" for call_args in patch_info.call_args), "The heartbeat was not written to the agent's log") diff --git a/tests/lib/cgroups_tools.py b/tests/lib/cgroups_tools.py index 45b8174474..cb29ee9bfc 100644 --- a/tests/lib/cgroups_tools.py +++ b/tests/lib/cgroups_tools.py @@ -33,17 +33,3 @@ def create_legacy_agent_cgroup(cgroups_file_system_root, controller, daemon_pid) fileutil.append_file(os.path.join(legacy_cgroup, "cgroup.procs"), daemon_pid + "\n") return legacy_cgroup - @staticmethod - def create_agent_cgroup(cgroups_file_system_root, controller, extension_handler_pid): - """ - Previous versions of the daemon (2.2.31-2.2.40) wrote their PID to /sys/fs/cgroup/{cpu,memory}/WALinuxAgent/WALinuxAgent; - starting from version 2.2.41 we track the agent service in walinuxagent.service instead of WALinuxAgent/WALinuxAgent. - - This method creates a mock cgroup using the newer path and adds the given PID to it. - """ - new_cgroup = os.path.join(cgroups_file_system_root, controller, "walinuxagent.service") - if not os.path.exists(new_cgroup): - os.makedirs(new_cgroup) - fileutil.append_file(os.path.join(new_cgroup, "cgroup.procs"), extension_handler_pid + "\n") - return new_cgroup - diff --git a/tests/lib/mock_cgroup_environment.py b/tests/lib/mock_cgroup_environment.py index 3b51dce8fe..a8f5fa9a3a 100644 --- a/tests/lib/mock_cgroup_environment.py +++ b/tests/lib/mock_cgroup_environment.py @@ -20,29 +20,11 @@ from tests.lib.tools import patch, data_dir from tests.lib.mock_environment import MockEnvironment, MockCommand -_MOCKED_COMMANDS = [ +# Mocked commands which are common between v1, v2, and hybrid cgroup environments +_MOCKED_COMMANDS_COMMON = [ MockCommand(r"^systemctl --version$", '''systemd 237 +PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN2 +IDN -PCRE2 default-hierarchy=hybrid -'''), - - MockCommand(r"^mount -t cgroup$", -'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd) -cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma) -cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) -cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio) -cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event) -cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb) -cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) -cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory) -cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids) -cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices) -cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct) -cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) -'''), - - MockCommand(r"^mount -t cgroup2$", -'''cgroup on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime) '''), MockCommand(r"^systemctl show walinuxagent\.service --property Slice", @@ -77,10 +59,87 @@ ] -_MOCKED_FILES = [ - ("/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'proc_self_cgroup')), - (r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'proc_pid_cgroup')), - ("/sys/fs/cgroup/unified/cgroup.controllers", os.path.join(data_dir, 'cgroups', 'sys_fs_cgroup_unified_cgroup.controllers')) +_MOCKED_COMMANDS_V1 = [ + MockCommand(r"^findmnt -t cgroup --noheadings$", +'''/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd +/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices +/sys/fs/cgroup/rdma cgroup cgroup rw,nosuid,nodev,noexec,relatime,rdma +/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event +/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio +/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio +/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset +/sys/fs/cgroup/misc cgroup cgroup rw,nosuid,nodev,noexec,relatime,misc +/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct +/sys/fs/cgroup/memory cgroup cgroup rw,nosuid,nodev,noexec,relatime,memory +/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer +/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb +/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids +'''), + + MockCommand(r"^findmnt -t cgroup2 --noheadings$", ''), + + MockCommand(r"^stat -f --format=%T /sys/fs/cgroup$", 'tmpfs'), + +] + +_MOCKED_COMMANDS_V2 = [ + MockCommand(r"^findmnt -t cgroup2 --noheadings$", +'''/sys/fs/cgroup cgroup2 cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot +'''), + + MockCommand(r"^findmnt -t cgroup --noheadings$", ''), + + MockCommand(r"^stat -f --format=%T /sys/fs/cgroup$", 'cgroup2fs'), + +] + +_MOCKED_COMMANDS_HYBRID = [ + MockCommand(r"^findmnt -t cgroup --noheadings$", +'''/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd +/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices +/sys/fs/cgroup/rdma cgroup cgroup rw,nosuid,nodev,noexec,relatime,rdma +/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event +/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio +/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio +/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset +/sys/fs/cgroup/misc cgroup cgroup rw,nosuid,nodev,noexec,relatime,misc +/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct +/sys/fs/cgroup/memory cgroup cgroup rw,nosuid,nodev,noexec,relatime,memory +/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer +/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb +/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids +'''), + + MockCommand(r"^findmnt -t cgroup2 --noheadings$", +'''/sys/fs/cgroup/unified cgroup2 cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate +'''), + + MockCommand(r"^stat -f --format=%T /sys/fs/cgroup$", 'tmpfs'), + + MockCommand(r"^stat -f --format=%T /sys/fs/cgroup/unified$", 'cgroup2fs'), + +] + +_MOCKED_FILES_V1 = [ + ("/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'v1', 'proc_self_cgroup')), + (r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'v1', 'proc_pid_cgroup')), + (r"/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service/cgroup.procs", os.path.join(data_dir, 'cgroups', 'cgroup.procs')), + (r"/sys/fs/cgroup/memory/system.slice/walinuxagent.service/cgroup.procs", os.path.join(data_dir, 'cgroups', 'cgroup.procs')) +] + +_MOCKED_FILES_V2 = [ + ("/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'v2', 'proc_self_cgroup')), + (r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'v2', 'proc_pid_cgroup')), + ("/sys/fs/cgroup/cgroup.subtree_control", os.path.join(data_dir, 'cgroups', 'v2', 'sys_fs_cgroup_cgroup.subtree_control')), + ("/sys/fs/cgroup/azure.slice/cgroup.subtree_control", os.path.join(data_dir, 'cgroups', 'v2', 'sys_fs_cgroup_cgroup.subtree_control')), + ("/sys/fs/cgroup/azure.slice/walinuxagent.service/cgroup.subtree_control", os.path.join(data_dir, 'cgroups', 'v2', 'sys_fs_cgroup_cgroup.subtree_control_empty')), + (r"/sys/fs/cgroup/system.slice/walinuxagent.service/cgroup.procs", os.path.join(data_dir, 'cgroups', 'cgroup.procs')) +] + +_MOCKED_FILES_HYBRID = [ + ("/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'v1', 'proc_self_cgroup')), + (r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'v1', 'proc_pid_cgroup')), + ("/sys/fs/cgroup/unified/cgroup.controllers", os.path.join(data_dir, 'cgroups', 'hybrid', 'sys_fs_cgroup_cgroup.controllers')) ] _MOCKED_PATHS = [ @@ -106,18 +165,56 @@ class UnitFilePaths: @contextlib.contextmanager -def mock_cgroup_environment(tmp_dir): +def mock_cgroup_v1_environment(tmp_dir): + """ + Creates a mock environment for cgroup v1 hierarchy used by the tests related to cgroups (currently it only + provides support for systemd platforms). + The command output used in __MOCKED_COMMANDS comes from an Ubuntu 20 system. + """ + data_files = [ + (os.path.join(data_dir, 'init', 'walinuxagent.service'), UnitFilePaths.walinuxagent), + (os.path.join(data_dir, 'init', 'azure.slice'), UnitFilePaths.azure), + (os.path.join(data_dir, 'init', 'azure-vmextensions.slice'), UnitFilePaths.vmextensions) + ] + + with patch('azurelinuxagent.ga.cgroupapi.CGroupUtil.cgroups_supported', return_value=True): + with patch('azurelinuxagent.common.osutil.systemd.is_systemd', return_value=True): + with MockEnvironment(tmp_dir, commands=_MOCKED_COMMANDS_COMMON + _MOCKED_COMMANDS_V1, paths=_MOCKED_PATHS, files=_MOCKED_FILES_V1, data_files=data_files) as mock: + yield mock + + +@contextlib.contextmanager +def mock_cgroup_v2_environment(tmp_dir): + """ + Creates a mock environment for cgroup v2 hierarchy used by the tests related to cgroups (currently it only + provides support for systemd platforms). + The command output used in __MOCKED_COMMANDS comes from an Ubuntu 22 system. + """ + data_files = [ + (os.path.join(data_dir, 'init', 'walinuxagent.service'), UnitFilePaths.walinuxagent), + (os.path.join(data_dir, 'init', 'azure.slice'), UnitFilePaths.azure), + (os.path.join(data_dir, 'init', 'azure-vmextensions.slice'), UnitFilePaths.vmextensions) + ] + + with patch('azurelinuxagent.ga.cgroupapi.CGroupUtil.cgroups_supported', return_value=True): + with patch('azurelinuxagent.common.osutil.systemd.is_systemd', return_value=True): + with MockEnvironment(tmp_dir, commands=_MOCKED_COMMANDS_COMMON + _MOCKED_COMMANDS_V2, paths=_MOCKED_PATHS, files=_MOCKED_FILES_V2, data_files=data_files) as mock: + yield mock + + +@contextlib.contextmanager +def mock_cgroup_hybrid_environment(tmp_dir): + """ + Creates a mock environment for cgroup hybrid hierarchy used by the tests related to cgroups (currently it only + provides support for systemd platforms). """ - Creates a mocks environment used by the tests related to cgroups (currently it only provides support for systemd platforms). - The command output used in __MOCKED_COMMANDS comes from an Ubuntu 18 system. - """ data_files = [ (os.path.join(data_dir, 'init', 'walinuxagent.service'), UnitFilePaths.walinuxagent), (os.path.join(data_dir, 'init', 'azure.slice'), UnitFilePaths.azure), (os.path.join(data_dir, 'init', 'azure-vmextensions.slice'), UnitFilePaths.vmextensions) ] - with patch('azurelinuxagent.ga.cgroupapi.CGroupsApi.cgroups_supported', return_value=True): + with patch('azurelinuxagent.ga.cgroupapi.CGroupUtil.cgroups_supported', return_value=True): with patch('azurelinuxagent.common.osutil.systemd.is_systemd', return_value=True): - with MockEnvironment(tmp_dir, commands=_MOCKED_COMMANDS, paths=_MOCKED_PATHS, files=_MOCKED_FILES, data_files=data_files) as mock: + with MockEnvironment(tmp_dir, commands=_MOCKED_COMMANDS_COMMON + _MOCKED_COMMANDS_HYBRID, paths=_MOCKED_PATHS, files=_MOCKED_FILES_HYBRID, data_files=data_files) as mock: yield mock diff --git a/tests/lib/mock_command.py b/tests/lib/mock_command.py index e181d26d97..83509c3d37 100755 --- a/tests/lib/mock_command.py +++ b/tests/lib/mock_command.py @@ -2,12 +2,18 @@ import os import sys -if len(sys.argv) != 4: +if len(sys.argv) < 4: sys.stderr.write("usage: {0} ".format(os.path.basename(__file__))) # W0632: Possible unbalanced tuple unpacking with sequence: left side has 3 label(s), right side has 0 value(s) (unbalanced-tuple-unpacking) # Disabled: Unpacking is balanced: there is a check for the length on line 5 -stdout, return_value, stderr = sys.argv[1:] # pylint: disable=W0632 + +# This script will be used for mocking cgroups commands in test, when popen called this script will be executed instead of actual commands +# We pass stdout, return_value, stderr of the mocked command output as arguments to this script and this script will print them to stdout, stderr and exit with the return value +# So that popen gets the output of the mocked command. Ideally we should get 4 arguments in sys.argv, first one is the script name, next 3 are the actual command output +# But somehow when we run the tests from pycharm, it adds extra arguments next to the script name, so we need to handle that when reading the arguments +# ex: /home/nag/Documents/repos/WALinuxAgent/tests/lib/mock_command.py /snap/pycharm-professional/412/plugins/python-ce/helpers/py... +BLKID +ELFUTILS +KMOD -IDN2 +IDN -PCRE2 default-hierarchy=hybrid\n 0 +stdout, return_value, stderr = sys.argv[-3:] # pylint: disable=W0632 if stdout != '': sys.stdout.write(stdout) diff --git a/tests/lib/mock_environment.py b/tests/lib/mock_environment.py index 8f5682cf8e..5b72093584 100644 --- a/tests/lib/mock_environment.py +++ b/tests/lib/mock_environment.py @@ -76,12 +76,14 @@ def __init__(self, tmp_dir, commands=None, paths=None, files=None, data_files=No self._original_popen = subprocess.Popen self._original_mkdir = fileutil.mkdir self._original_path_exists = os.path.exists + self._original_os_remove = os.remove self._original_open = open self.patchers = [ patch_builtin("open", side_effect=self._mock_open), patch("subprocess.Popen", side_effect=self._mock_popen), patch("os.path.exists", side_effect=self._mock_path_exists), + patch("os.remove", side_effect=self._mock_os_remove), patch("azurelinuxagent.common.utils.fileutil.mkdir", side_effect=self._mock_mkdir) ] @@ -166,3 +168,6 @@ def _mock_open(self, path, *args, **kwargs): def _mock_path_exists(self, path): return self._original_path_exists(self.get_mapped_path(path)) + def _mock_os_remove(self, path): + return self._original_os_remove(self.get_mapped_path(path)) + diff --git a/tests/lib/mock_wire_protocol.py b/tests/lib/mock_wire_protocol.py index 78cbc59e2e..2cf2b10e0a 100644 --- a/tests/lib/mock_wire_protocol.py +++ b/tests/lib/mock_wire_protocol.py @@ -22,7 +22,7 @@ @contextlib.contextmanager -def mock_wire_protocol(mock_wire_data_file, http_get_handler=None, http_post_handler=None, http_put_handler=None, do_not_mock=lambda method, url: False, fail_on_unknown_request=True, save_to_history=False): +def mock_wire_protocol(mock_wire_data_file, http_get_handler=None, http_post_handler=None, http_put_handler=None, do_not_mock=lambda method, url: False, fail_on_unknown_request=True, save_to_history=False, detect_protocol=True): """ Creates a WireProtocol object that handles requests to the WireServer, the Host GA Plugin, and some requests to storage (requests that provide mock data in wire_protocol_data.py). @@ -149,7 +149,8 @@ def stop(): # go do it try: protocol.start() - protocol.detect(save_to_history=save_to_history) + if detect_protocol: + protocol.detect(save_to_history=save_to_history) yield protocol finally: protocol.stop() diff --git a/tests/lib/tools.py b/tests/lib/tools.py index 11bd801917..fc1f72150d 100644 --- a/tests/lib/tools.py +++ b/tests/lib/tools.py @@ -29,7 +29,7 @@ import time import unittest from functools import wraps -from threading import currentThread +from threading import current_thread import azurelinuxagent.common.conf as conf import azurelinuxagent.common.event as event @@ -42,9 +42,6 @@ try: from unittest.mock import Mock, patch, MagicMock, ANY, DEFAULT, call, PropertyMock # pylint: disable=unused-import - - # Import mock module for Python2 and Python3 - from bin.waagent2 import Agent # pylint: disable=unused-import except ImportError: from mock import Mock, patch, MagicMock, ANY, DEFAULT, call, PropertyMock @@ -156,8 +153,6 @@ def setUpClass(cls): cls.assertIsNone = cls.emulate_assertIsNone if not hasattr(cls, "assertIsNotNone"): cls.assertIsNotNone = cls.emulate_assertIsNotNone - if hasattr(cls, "assertRaisesRegexp"): - cls.assertRaisesRegex = cls.assertRaisesRegexp if not hasattr(cls, "assertRaisesRegex"): cls.assertRaisesRegex = cls.emulate_raises_regex if not hasattr(cls, "assertListEqual"): @@ -449,22 +444,6 @@ def create_script(script_file, contents): os.chmod(script_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) -class AgentTestCaseWithGetVmSizeMock(AgentTestCase): - - def setUp(self): - - self._get_vm_size_patch = patch('azurelinuxagent.ga.update.UpdateHandler._get_vm_size', return_value="unknown") - self._get_vm_size_patch.start() - - super(AgentTestCaseWithGetVmSizeMock, self).setUp() - - def tearDown(self): - - if self._get_vm_size_patch: - self._get_vm_size_patch.stop() - - super(AgentTestCaseWithGetVmSizeMock, self).tearDown() - def load_data(name): """Load test data""" path = os.path.join(data_dir, name) @@ -543,6 +522,6 @@ def wrapper(self, *args, **kwargs): def clear_singleton_instances(cls): # Adding this lock to avoid any race conditions with cls._lock: - obj_name = "%s__%s" % (cls.__name__, currentThread().getName()) # Object Name = className__threadName + obj_name = "%s__%s" % (cls.__name__, current_thread().name) # Object Name = className__threadName if obj_name in cls._instances: del cls._instances[obj_name] diff --git a/tests/test_agent.py b/tests/test_agent.py index f892f090e2..ad3024113b 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -17,12 +17,17 @@ import os.path +import azurelinuxagent.common.logger as logger + from azurelinuxagent.agent import parse_args, Agent, usage, AgentCommands from azurelinuxagent.common import conf +from azurelinuxagent.common.exception import CGroupsException from azurelinuxagent.ga import logcollector, cgroupconfigurator -from azurelinuxagent.ga.cgroupapi import SystemdCgroupsApi from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.ga.cgroupapi import InvalidCgroupMountpointException, CgroupV1, CgroupV2 from azurelinuxagent.ga.collect_logs import CollectLogsHandler +from azurelinuxagent.ga.cgroupcontroller import AGENT_LOG_COLLECTOR +from tests.lib.mock_cgroup_environment import mock_cgroup_v1_environment, mock_cgroup_v2_environment from tests.lib.tools import AgentTestCase, data_dir, Mock, patch EXPECTED_CONFIGURATION = \ @@ -43,10 +48,12 @@ Debug.CgroupMonitorExpiryTime = 2022-03-31 Debug.CgroupMonitorExtensionName = Microsoft.Azure.Monitor.AzureMonitorLinuxAgent Debug.EnableAgentMemoryUsageCheck = False +Debug.EnableCgroupV2ResourceLimiting = False Debug.EnableFastTrack = True Debug.EnableGAVersioning = True Debug.EtpCollectionPeriod = 300 Debug.FirewallRulesLogPeriod = 86400 +Debug.LogCollectorInitialDelay = 300 DetectScvmmEnv = False EnableOverProvisioning = True Extension.LogDir = /var/log/azure @@ -101,6 +108,11 @@ class TestAgent(AgentTestCase): + def tearDown(self): + # These tests instantiate the Agent class, which has the side effect + # of initializing the global logger and conf objects; reset them. + logger.DEFAULT_LOGGER = logger.Logger() + conf.__conf__.values = {} def test_accepts_configuration_path(self): conf_path = os.path.join(data_dir, "test_waagent.conf") @@ -222,7 +234,7 @@ def test_rejects_invalid_log_collector_mode(self, mock_exit, mock_stderr): # py @patch("azurelinuxagent.agent.LogCollector") def test_calls_collect_logs_with_proper_mode(self, mock_log_collector, *args): # pylint: disable=unused-argument agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) - mock_log_collector.run = Mock() + mock_log_collector.return_value.collect_logs_and_get_archive.return_value = (Mock(), Mock()) # LogCollector.collect_logs_and_get_archive returns a tuple agent.collect_logs(is_full_mode=True) full_mode = mock_log_collector.call_args_list[0][0][0] @@ -233,49 +245,251 @@ def test_calls_collect_logs_with_proper_mode(self, mock_log_collector, *args): self.assertFalse(full_mode) @patch("azurelinuxagent.agent.LogCollector") - def test_calls_collect_logs_on_valid_cgroups(self, mock_log_collector): + def test_calls_collect_logs_on_valid_cgroups_v1(self, mock_log_collector): + try: + CollectLogsHandler.enable_monitor_cgroups_check() + mock_log_collector.return_value.collect_logs_and_get_archive.return_value = (Mock(), Mock()) # LogCollector.collect_logs_and_get_archive returns a tuple + + # Mock cgroup so process is in the log collector slice + def mock_cgroup(*args, **kwargs): # pylint: disable=W0613 + relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT) + return CgroupV1( + cgroup_name=AGENT_LOG_COLLECTOR, + controller_mountpoints={ + 'cpu,cpuacct':"/sys/fs/cgroup/cpu,cpuacct", + 'memory':"/sys/fs/cgroup/memory" + }, + controller_paths={ + 'cpu,cpuacct':"/sys/fs/cgroup/cpu,cpuacct/{0}".format(relative_path), + 'memory':"/sys/fs/cgroup/memory/{0}".format(relative_path) + } + ) + + with mock_cgroup_v1_environment(self.tmp_dir): + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup", + side_effect=mock_cgroup): + agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) + agent.collect_logs(is_full_mode=True) + + self.assertEqual(1, mock_log_collector.call_count, "LogCollector should be called once") + + finally: + CollectLogsHandler.disable_monitor_cgroups_check() + + @patch("azurelinuxagent.agent.LogCollector") + def test_calls_collect_logs_on_valid_cgroups_v2(self, mock_log_collector): + try: + CollectLogsHandler.enable_monitor_cgroups_check() + mock_log_collector.return_value.collect_logs_and_get_archive.return_value = ( + Mock(), Mock()) # LogCollector.collect_logs_and_get_archive returns a tuple + + # Mock cgroup so process is in the log collector slice + def mock_cgroup(*args, **kwargs): # pylint: disable=W0613 + relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT) + return CgroupV2( + cgroup_name=AGENT_LOG_COLLECTOR, + root_cgroup_path="/sys/fs/cgroup", + cgroup_path="/sys/fs/cgroup/{0}".format(relative_path), + enabled_controllers=["cpu", "memory"] + ) + + with mock_cgroup_v2_environment(self.tmp_dir): + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup", side_effect=mock_cgroup): + agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) + agent.collect_logs(is_full_mode=True) + + self.assertEqual(1, mock_log_collector.call_count, "LogCollector should be called once") + + finally: + CollectLogsHandler.disable_monitor_cgroups_check() + + @patch("azurelinuxagent.agent.LogCollector") + def test_doesnt_call_collect_logs_when_cgroup_api_cannot_be_determined(self, mock_log_collector): try: CollectLogsHandler.enable_monitor_cgroups_check() mock_log_collector.run = Mock() - def mock_cgroup_paths(*args, **kwargs): - if args and args[0] == "self": - relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT) - return (cgroupconfigurator.LOGCOLLECTOR_SLICE, relative_path) - return SystemdCgroupsApi.get_process_cgroup_relative_paths(*args, **kwargs) + # Mock cgroup api to raise CGroupsException + def mock_get_cgroup_api(): + raise CGroupsException("") + + def raise_on_sys_exit(*args): + raise RuntimeError(args[0] if args else "Exiting") - with patch("azurelinuxagent.agent.SystemdCgroupsApi.get_process_cgroup_paths", side_effect=mock_cgroup_paths): + with patch("azurelinuxagent.agent.get_cgroup_api", side_effect=mock_get_cgroup_api): agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) - agent.collect_logs(is_full_mode=True) - - mock_log_collector.assert_called_once() + + with patch("sys.exit", side_effect=raise_on_sys_exit) as mock_exit: + try: + agent.collect_logs(is_full_mode=True) + except RuntimeError as re: + self.assertEqual(logcollector.INVALID_CGROUPS_ERRCODE, re.args[0]) + mock_exit.assert_called_once_with(logcollector.INVALID_CGROUPS_ERRCODE) + finally: + CollectLogsHandler.disable_monitor_cgroups_check() + + @patch("azurelinuxagent.agent.LogCollector") + def test_doesnt_call_collect_logs_on_invalid_cgroups_v1(self, mock_log_collector): + try: + CollectLogsHandler.enable_monitor_cgroups_check() + mock_log_collector.run = Mock() + + # Mock cgroup so process is in incorrect slice + def mock_cgroup(*args, **kwargs): # pylint: disable=W0613 + relative_path = "NOT_THE_CORRECT_PATH" + return CgroupV1( + cgroup_name=AGENT_LOG_COLLECTOR, + controller_mountpoints={ + 'cpu,cpuacct': "/sys/fs/cgroup/cpu,cpuacct", + 'memory': "/sys/fs/cgroup/memory" + }, + controller_paths={ + 'cpu,cpuacct': "/sys/fs/cgroup/cpu,cpuacct/{0}".format(relative_path), + 'memory': "/sys/fs/cgroup/memory/{0}".format(relative_path) + } + ) + + def raise_on_sys_exit(*args): + raise RuntimeError(args[0] if args else "Exiting") + + with mock_cgroup_v1_environment(self.tmp_dir): + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup", side_effect=mock_cgroup): + agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) + + with patch("sys.exit", side_effect=raise_on_sys_exit) as mock_exit: + try: + agent.collect_logs(is_full_mode=True) + except RuntimeError as re: + self.assertEqual(logcollector.INVALID_CGROUPS_ERRCODE, re.args[0]) + mock_exit.assert_called_once_with(logcollector.INVALID_CGROUPS_ERRCODE) + finally: + CollectLogsHandler.disable_monitor_cgroups_check() + + @patch("azurelinuxagent.agent.LogCollector") + def test_doesnt_call_collect_logs_on_invalid_cgroups_v2(self, mock_log_collector): + try: + CollectLogsHandler.enable_monitor_cgroups_check() + mock_log_collector.run = Mock() + + # Mock cgroup so process is in incorrect slice + def mock_cgroup(*args, **kwargs): # pylint: disable=W0613 + relative_path = "NOT_THE_CORRECT_PATH" + return CgroupV2( + cgroup_name=AGENT_LOG_COLLECTOR, + root_cgroup_path="/sys/fs/cgroup", + cgroup_path="/sys/fs/cgroup/{0}".format(relative_path), + enabled_controllers=["cpu", "memory"] + ) + + def raise_on_sys_exit(*args): + raise RuntimeError(args[0] if args else "Exiting") + + with mock_cgroup_v2_environment(self.tmp_dir): + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup", + side_effect=mock_cgroup): + agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) + + with patch("sys.exit", side_effect=raise_on_sys_exit) as mock_exit: + try: + agent.collect_logs(is_full_mode=True) + except RuntimeError as re: + self.assertEqual(logcollector.INVALID_CGROUPS_ERRCODE, re.args[0]) + mock_exit.assert_called_once_with(logcollector.INVALID_CGROUPS_ERRCODE) finally: CollectLogsHandler.disable_monitor_cgroups_check() + @patch('azurelinuxagent.agent.get_cgroup_api', side_effect=InvalidCgroupMountpointException("Test")) @patch("azurelinuxagent.agent.LogCollector") - def test_doesnt_call_collect_logs_on_invalid_cgroups(self, mock_log_collector): + def test_doesnt_call_collect_logs_on_non_systemd_cgroups_v1_mountpoints(self, mock_log_collector, _): try: CollectLogsHandler.enable_monitor_cgroups_check() mock_log_collector.run = Mock() - def mock_cgroup_paths(*args, **kwargs): - if args and args[0] == "self": - return ("NOT_THE_CORRECT_PATH", "NOT_THE_CORRECT_PATH") - return SystemdCgroupsApi.get_process_cgroup_relative_paths(*args, **kwargs) + def raise_on_sys_exit(*args): + raise RuntimeError(args[0] if args else "Exiting") - with patch("azurelinuxagent.agent.SystemdCgroupsApi.get_process_cgroup_paths", side_effect=mock_cgroup_paths): + with mock_cgroup_v1_environment(self.tmp_dir): agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) - exit_error = RuntimeError("Exiting") - with patch("sys.exit", return_value=exit_error) as mock_exit: + with patch("sys.exit", side_effect=raise_on_sys_exit) as mock_exit: try: agent.collect_logs(is_full_mode=True) except RuntimeError as re: + self.assertEqual(logcollector.INVALID_CGROUPS_ERRCODE, re.args[0]) + mock_exit.assert_called_once_with(logcollector.INVALID_CGROUPS_ERRCODE) + finally: + CollectLogsHandler.disable_monitor_cgroups_check() + + @patch("azurelinuxagent.agent.LogCollector") + def test_doesnt_call_collect_logs_if_either_controller_not_mounted(self, mock_log_collector): + try: + CollectLogsHandler.enable_monitor_cgroups_check() + mock_log_collector.run = Mock() + + # Mock cgroup so process is in the log collector slice and cpu is not mounted + def mock_cgroup(*args, **kwargs): # pylint: disable=W0613 + relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT) + return CgroupV1( + cgroup_name=AGENT_LOG_COLLECTOR, + controller_mountpoints={ + 'memory': "/sys/fs/cgroup/memory" + }, + controller_paths={ + 'memory': "/sys/fs/cgroup/memory/{0}".format(relative_path) + } + ) + + def raise_on_sys_exit(*args): + raise RuntimeError(args[0] if args else "Exiting") + + with mock_cgroup_v1_environment(self.tmp_dir): + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup", + side_effect=mock_cgroup): + agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) + + with patch("sys.exit", side_effect=raise_on_sys_exit) as mock_exit: + try: + agent.collect_logs(is_full_mode=True) + except RuntimeError as re: + self.assertEqual(logcollector.INVALID_CGROUPS_ERRCODE, re.args[0]) mock_exit.assert_called_once_with(logcollector.INVALID_CGROUPS_ERRCODE) - self.assertEqual(exit_error, re) finally: CollectLogsHandler.disable_monitor_cgroups_check() - + + @patch("azurelinuxagent.agent.LogCollector") + @patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler.get_max_recorded_metrics") + def test_collect_log_should_output_resource_usage_summary(self, mock_get_max_recorded_metrics, mock_log_collector): + try: + CollectLogsHandler.enable_monitor_cgroups_check() + mock_log_collector.return_value.collect_logs_and_get_archive.return_value = (Mock(), Mock()) # LogCollector.collect_logs_and_get_archive returns a tuple + mock_get_max_recorded_metrics.return_value = {} + + # Mock cgroup so process is in the log collector slice + def mock_cgroup(*args, **kwargs): # pylint: disable=W0613 + relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT) + return CgroupV1( + cgroup_name=AGENT_LOG_COLLECTOR, + controller_mountpoints={ + 'cpu,cpuacct': "/sys/fs/cgroup/cpu,cpuacct", + 'memory': "/sys/fs/cgroup/memory" + }, + controller_paths={ + 'cpu,cpuacct': "/sys/fs/cgroup/cpu,cpuacct/{0}".format(relative_path), + 'memory': "/sys/fs/cgroup/memory/{0}".format(relative_path) + } + ) + + with mock_cgroup_v1_environment(self.tmp_dir): + with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup", side_effect=mock_cgroup): + agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf")) + agent.collect_logs(is_full_mode=True) + + self.assertEqual(1, mock_log_collector.call_count, "LogCollector should be called once") + self.assertEqual(1, mock_get_max_recorded_metrics.call_count, "get_max_recorded_metrics should be called once") + + finally: + CollectLogsHandler.disable_monitor_cgroups_check() + def test_it_should_parse_setup_firewall_properly(self): test_firewall_meta = { diff --git a/tests_e2e/orchestrator/lib/agent_test_loader.py b/tests_e2e/orchestrator/lib/agent_test_loader.py index 11e665c13f..ba54f0b592 100644 --- a/tests_e2e/orchestrator/lib/agent_test_loader.py +++ b/tests_e2e/orchestrator/lib/agent_test_loader.py @@ -83,12 +83,34 @@ class VmImageInfo(object): def __str__(self): return self.urn +class CustomImage(object): + + # Images from a gallery are given as "//". + _IMAGE_FROM_GALLERY = re.compile(r"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)") + + @staticmethod + def _is_image_from_gallery(image: str) -> bool: + """ + Verifies if image is from shared gallery + """ + return CustomImage._IMAGE_FROM_GALLERY.match(image) is not None + + @staticmethod + def _get_name_of_image_from_gallery(image: str) -> str: + """ + Get image name from shared gallery + """ + match = CustomImage._IMAGE_FROM_GALLERY.match(image) + if match is None: + raise Exception(f"Invalid image from gallery: {image}") + return match.group('image') + class AgentTestLoader(object): """ Loads a given set of test suites from the YAML configuration files. """ - def __init__(self, test_suites: str, cloud: str): + def __init__(self, test_suites: List[str], cloud: str): """ Loads the specified 'test_suites', which are given as a string of comma-separated suite names or a YAML description of a single test_suite. @@ -134,6 +156,7 @@ def _validate(self): """ Performs some basic validations on the data loaded from the YAML description files """ + def _parse_image(image: str) -> str: """ Parses a reference to an image or image set and returns the name of the image or image set @@ -147,8 +170,11 @@ def _parse_image(image: str) -> str: # Validate that the images the suite must run on are in images.yml for image in suite.images: image = _parse_image(image) + # skip validation if suite image from gallery image + if CustomImage._is_image_from_gallery(image): + continue if image not in self.images: - raise Exception(f"Invalid image reference in test suite {suite.name}: Can't find {image} in images.yml") + raise Exception(f"Invalid image reference in test suite {suite.name}: Can't find {image} in images.yml or image from a shared gallery") # If the suite specifies a cloud and it's location, validate that location string is start with and then validate that the images it uses are available in that location for suite_location in suite.locations: @@ -158,6 +184,9 @@ def _parse_image(image: str) -> str: continue for suite_image in suite.images: suite_image = _parse_image(suite_image) + # skip validation if suite image from gallery image + if CustomImage._is_image_from_gallery(suite_image): + continue for image in self.images[suite_image]: # If the image has a location restriction, validate that it is available on the location the suite must run on if image.locations: @@ -175,25 +204,9 @@ def _parse_image(image: str) -> str: if suite_skip_image not in self.images: raise Exception(f"Invalid image reference in test suite {suite.name}: Can't find {suite_skip_image} in images.yml") - @staticmethod - def _load_test_suites(test_suites: str) -> List[TestSuiteInfo]: - # - # Attempt to parse 'test_suites' as the YML description of a single suite - # - parsed = yaml.safe_load(test_suites) - - # - # A comma-separated list (e.g. "foo", "foo, bar", etc.) is valid YAML, but it is parsed as a string. An actual test suite would - # be parsed as a dictionary. If it is a dict, take is as the YML description of a single test suite - # - if isinstance(parsed, dict): - return [AgentTestLoader._load_test_suite(parsed)] - - # - # If test_suites is not YML, then it should be a comma-separated list of description files - # - description_files: List[Path] = [AgentTestLoader._SOURCE_CODE_ROOT/"test_suites"/f"{t.strip()}.yml" for t in test_suites.split(',')] + def _load_test_suites(test_suites: List[str]) -> List[TestSuiteInfo]: + description_files: List[Path] = [AgentTestLoader._SOURCE_CODE_ROOT/"test_suites"/f"{t}.yml" for t in test_suites] return [AgentTestLoader._load_test_suite(f) for f in description_files] @staticmethod @@ -224,8 +237,8 @@ def _load_test_suite(description_file: Path) -> TestSuiteInfo: rest of the tests in the suite will not be executed). By default, a failure on a test does not stop execution of the test suite. * images - A string, or a list of strings, specifying the images on which the test suite must be executed. Each value - can be the name of a single image (e.g."ubuntu_2004"), or the name of an image set (e.g. "endorsed"). The - names for images and image sets are defined in WALinuxAgent/tests_e2e/tests_suites/images.yml. + can be the name of a single image (e.g."ubuntu_2004"), or the name of an image set (e.g. "endorsed") or shared gallery image(e.g. "gallery/wait-cloud-init/1.0.2"). + The names for images and image sets are defined in WALinuxAgent/tests_e2e/tests_suites/images.yml. * locations - [Optional; string or list of strings] If given, the test suite must be executed on that cloud location(e.g. "AzureCloud:eastus2euap"). If not specified, or set to an empty string, the test suite will be executed in the default location. This is useful for test suites that exercise a feature that is enabled only in certain regions. diff --git a/tests_e2e/orchestrator/lib/agent_test_suite.py b/tests_e2e/orchestrator/lib/agent_test_suite.py index f432c2d4c1..b29c16b3c6 100644 --- a/tests_e2e/orchestrator/lib/agent_test_suite.py +++ b/tests_e2e/orchestrator/lib/agent_test_suite.py @@ -46,6 +46,7 @@ import makepkg from azurelinuxagent.common.version import AGENT_VERSION +from tests_e2e.tests.lib.retry import retry_if_false from tests_e2e.tests.lib.virtual_machine_client import VirtualMachineClient from tests_e2e.tests.lib.virtual_machine_scale_set_client import VirtualMachineScaleSetClient @@ -149,6 +150,8 @@ def __init__(self, metadata: TestSuiteMetadata) -> None: self._test_suites: List[AgentTestSuite] # Test suites to execute in the environment + self._test_args: Dict[str, str] # Additional arguments pass to the test suite + self._cloud: str # Azure cloud where test VMs are located self._subscription_id: str # Azure subscription where test VMs are located self._location: str # Azure location (region) where test VMs are located @@ -209,6 +212,7 @@ def _initialize(self, environment: Environment, variables: Dict[str, Any], lisa_ self._environment_name = variables["c_env_name"] self._test_suites = variables["c_test_suites"] + self._test_args = self._get_test_args(variables["test_args"]) self._cloud = variables["cloud"] self._subscription_id = variables["subscription_id"] @@ -491,7 +495,7 @@ def _check_ssh_connectivity(ssh_client: SshClient) -> None: break except CommandError as error: # Check for "System is booting up. Unprivileged users are not permitted to log in yet. Please come back later. For technical details, see pam_nologin(8)." - if not any(m in error.stderr for m in ["Unprivileged users are not permitted to log in yet", "Permission denied"]): + if not any(m in error.stderr for m in ["Unprivileged users are not permitted to log in yet", "Permission denied", "Connection reset by peer"]): raise if attempt >= max_attempts - 1: raise Exception(f"SSH connectivity check failed after {max_attempts} attempts, giving up [{error}]") @@ -812,12 +816,15 @@ def _create_test_context(self,) -> AgentTestContext: subscription=self._subscription_id, resource_group=self._resource_group_name, name=self._vm_name) - return AgentVmTestContext( + vm_test_context = AgentVmTestContext( working_directory=self._working_directory, vm=vm, ip_address=self._vm_ip_address, username=self._user, identity_file=self._identity_file) + for key in self._test_args: + setattr(vm_test_context, key, self._test_args[key]) + return vm_test_context else: log.info("Creating test context for scale set") if self._create_scale_set: @@ -836,11 +843,27 @@ def _create_test_context(self,) -> AgentTestContext: if self._create_scale_set: self._test_nodes = [_TestNode(name=i.instance_name, ip_address=i.ip_address) for i in scale_set.get_instances_ip_address()] - return AgentVmssTestContext( + vmss_test_context = AgentVmssTestContext( working_directory=self._working_directory, vmss=scale_set, username=self._user, identity_file=self._identity_file) + for key in self._test_args: + setattr(vmss_test_context, key, self._test_args[key]) + return vmss_test_context + + @staticmethod + def _get_test_args(arg_str) -> Dict[str, str]: + """ + Returns the arguments to be passed to the test classes + """ + test_args: Dict[str, str] = {} + if arg_str == "": + return test_args + for arg in arg_str.split(','): + key, value = map(str.strip, arg.split('=')) + test_args[key] = value + return test_args @staticmethod def _mark_log_as_failed(): @@ -891,6 +914,10 @@ def _create_test_scale_set(self) -> None: self._lisa_log.info("Creating resource group %s", self._resource_group_name) resource_group = ResourceGroupClient(cloud=self._cloud, location=self._location, subscription=self._subscription_id, name=self._resource_group_name) resource_group.create() + exist = retry_if_false(resource_group.is_exists) + if not exist: + self._lisa_log.error("Failed to create resource group %s", self._resource_group_name) + raise Exception("Failed to create resource group: {0}".format(self._resource_group_name)) self._delete_scale_set = True self._lisa_log.info("Creating scale set %s", self._vmss_name) diff --git a/tests_e2e/orchestrator/lib/agent_test_suite_combinator.py b/tests_e2e/orchestrator/lib/agent_test_suite_combinator.py index ffecaf3630..07bb366328 100644 --- a/tests_e2e/orchestrator/lib/agent_test_suite_combinator.py +++ b/tests_e2e/orchestrator/lib/agent_test_suite_combinator.py @@ -22,7 +22,7 @@ from lisa.messages import TestStatus, TestResultMessage # pylint: disable=E0401 from lisa.util import field_metadata # pylint: disable=E0401 -from tests_e2e.orchestrator.lib.agent_test_loader import AgentTestLoader, VmImageInfo, TestSuiteInfo +from tests_e2e.orchestrator.lib.agent_test_loader import AgentTestLoader, VmImageInfo, TestSuiteInfo, CustomImage from tests_e2e.tests.lib.logging import set_thread_name from tests_e2e.tests.lib.virtual_machine_client import VirtualMachineClient from tests_e2e.tests.lib.virtual_machine_scale_set_client import VirtualMachineScaleSetClient @@ -46,6 +46,7 @@ class AgentTestSuitesCombinatorSchema(schema.Combinator): resource_group_name: str = field(default_factory=str, metadata=field_metadata(required=True)) subscription_id: str = field(default_factory=str, metadata=field_metadata(required=True)) test_suites: str = field(default_factory=str, metadata=field_metadata(required=True)) + default_test_suites: List[str] = field(default_factory=list, metadata=field_metadata(required=True)) user: str = field(default_factory=str, metadata=field_metadata(required=True)) vm_name: str = field(default_factory=str, metadata=field_metadata(required=True)) vm_size: str = field(default_factory=str, metadata=field_metadata(required=True)) @@ -81,20 +82,25 @@ def __init__(self, runbook: AgentTestSuitesCombinatorSchema) -> None: if self.runbook.resource_group_name == '': raise Exception("Invalid runbook parameters: The 'vmss_name' parameter indicates an existing VMSS, a 'resource_group_name' must be specified.") + if self.runbook.test_suites != "": + test_suites = [t.strip() for t in self.runbook.test_suites.split(',')] + else: + test_suites = self.runbook.default_test_suites + self._log: logging.Logger = logging.getLogger("lisa") with set_thread_name("AgentTestSuitesCombinator"): if self.runbook.vm_name != '': - self._environments = [self.create_existing_vm_environment()] + self._environments = [self.create_existing_vm_environment(test_suites)] elif self.runbook.vmss_name != '': - self._environments = [self.create_existing_vmss_environment()] + self._environments = [self.create_existing_vmss_environment(test_suites)] else: - self._environments = self.create_environment_list() + self._environments = self.create_environment_list(test_suites) self._index = 0 @classmethod def type_name(cls) -> str: - return "agent_test_suites" + return "agent_test_suite_combinator" @classmethod def type_schema(cls) -> Type[schema.TypedSchema]: @@ -125,7 +131,7 @@ def _next(self) -> Optional[Dict[str, Any]]: "AzureUSGovernment": "usgovarizona", } - def create_environment_list(self) -> List[Dict[str, Any]]: + def create_environment_list(self, test_suites: List[str]) -> List[Dict[str, Any]]: """ Examines the test_suites specified in the runbook and returns a list of the environments (i.e. test VMs or scale sets) that need to be created in order to execute these suites. @@ -136,7 +142,7 @@ def create_environment_list(self) -> List[Dict[str, Any]]: environments: List[Dict[str, Any]] = [] shared_environments: Dict[str, Dict[str, Any]] = {} # environments shared by multiple test suites - loader = AgentTestLoader(self.runbook.test_suites, self.runbook.cloud) + loader = AgentTestLoader(test_suites, self.runbook.cloud) runbook_images = self._get_runbook_images(loader) @@ -165,10 +171,10 @@ def create_environment_list(self) -> List[Dict[str, Any]]: vhd = image.urn image_name = urllib.parse.urlparse(vhd).path.split('/')[-1] # take the last fragment of the URL's path (e.g. "RHEL_8_Standard-8.3.202006170423.vhd") shared_gallery = "" - elif self._is_image_from_gallery(image.urn): + elif CustomImage._is_image_from_gallery(image.urn): marketplace_image = "" vhd = "" - image_name = self._get_name_of_image_from_gallery(image.urn) + image_name = CustomImage._get_name_of_image_from_gallery(image.urn) shared_gallery = image.urn else: marketplace_image = image.urn @@ -260,8 +266,8 @@ def create_environment_list(self) -> List[Dict[str, Any]]: return environments - def create_existing_vm_environment(self) -> Dict[str, Any]: - loader = AgentTestLoader(self.runbook.test_suites, self.runbook.cloud) + def create_existing_vm_environment(self, test_suites: List[str]) -> Dict[str, Any]: + loader = AgentTestLoader(test_suites, self.runbook.cloud) vm: VirtualMachineClient = VirtualMachineClient( cloud=self.runbook.cloud, @@ -300,8 +306,8 @@ def create_existing_vm_environment(self) -> Dict[str, Any]: "c_test_suites": loader.test_suites, } - def create_existing_vmss_environment(self) -> Dict[str, Any]: - loader = AgentTestLoader(self.runbook.test_suites, self.runbook.cloud) + def create_existing_vmss_environment(self, test_suites: List[str]) -> Dict[str, Any]: + loader = AgentTestLoader(test_suites, self.runbook.cloud) vmss = VirtualMachineScaleSetClient( cloud=self.runbook.cloud, @@ -445,7 +451,7 @@ def _get_runbook_images(self, loader: AgentTestLoader) -> List[VmImageInfo]: return images # If it is not image or image set, it must be a URN, VHD, or an image from a gallery - if not self._is_urn(self.runbook.image) and not self._is_vhd(self.runbook.image) and not self._is_image_from_gallery(self.runbook.image): + if not self._is_urn(self.runbook.image) and not self._is_vhd(self.runbook.image) and not CustomImage._is_image_from_gallery(self.runbook.image): raise Exception(f"The 'image' parameter must be an image, image set name, urn, vhd, or an image from a shared gallery: {self.runbook.image}") i = VmImageInfo() @@ -466,7 +472,15 @@ def _get_test_suite_images(suite: TestSuiteInfo, loader: AgentTestLoader) -> Lis for image in suite.images: match = AgentTestLoader.RANDOM_IMAGES_RE.match(image) if match is None: - image_list = loader.images[image] + # Added this condition for galley image as they don't have definition in images.yml + if CustomImage._is_image_from_gallery(image): + i = VmImageInfo() + i.urn = image + i.locations = [] + i.vm_sizes = [] + image_list = [i] + else: + image_list = loader.images[image] else: count = match.group('count') if count is None: @@ -560,20 +574,6 @@ def _is_vhd(vhd: str) -> bool: parsed = urllib.parse.urlparse(vhd) return parsed.scheme == 'https' and parsed.netloc != "" and parsed.path != "" - # Images from a gallery are given as "//". - _IMAGE_FROM_GALLERY = re.compile(r"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)") - - @staticmethod - def _is_image_from_gallery(image: str) -> bool: - return AgentTestSuitesCombinator._IMAGE_FROM_GALLERY.match(image) is not None - - @staticmethod - def _get_name_of_image_from_gallery(image: str) -> bool: - match = AgentTestSuitesCombinator._IMAGE_FROM_GALLERY.match(image) - if match is None: - raise Exception(f"Invalid image from gallery: {image}") - return match.group('image') - @staticmethod def _report_test_result( suite_name: str, diff --git a/tests_e2e/orchestrator/runbook.yml b/tests_e2e/orchestrator/runbook.yml index 8b0ef37ec7..b96cc5107c 100644 --- a/tests_e2e/orchestrator/runbook.yml +++ b/tests_e2e/orchestrator/runbook.yml @@ -26,12 +26,43 @@ variable: is_case_visible: true # - # Test suites to execute + # Test suites to execute. + # + # Use "test_suites" to specify from the command-line the test suites to execute. If not specifies, the "default_test_suites" are executed. # - name: test_suites - value: "agent_bvt, no_outbound_connections, extensions_disabled, agent_not_provisioned, fips, agent_ext_workflow, agent_status, multi_config_ext, agent_cgroups, ext_cgroups, agent_firewall, ext_telemetry_pipeline, ext_sequencing, agent_persist_firewall, publish_hostname, agent_update, recover_network_interface" + value: "" + - name: default_test_suites + value: + - agent_bvt + - agent_cgroups + - agent_ext_workflow + - agent_firewall + - agent_not_provisioned + - agent_persist_firewall + - agent_status + - agent_update + - ext_cgroups + - extensions_disabled + - ext_sequencing + - ext_telemetry_pipeline + - fips + - keyvault_certificates + - multi_config_ext + - no_outbound_connections + - publish_hostname + - recover_network_interface + - cgroup_v2_disabled + - log_collector + + # + # Additional arguments pass to the test suites # + - name: test_args + value: "" + is_case_visible: true + # Parameters used to create test VMs # - name: subscription_id @@ -183,7 +214,7 @@ environment: $(c_environment) platform: $(c_platform) combinator: - type: agent_test_suites + type: agent_test_suite_combinator allow_ssh: $(allow_ssh) cloud: $(cloud) identity_file: $(identity_file) @@ -193,6 +224,7 @@ combinator: resource_group_name: $(resource_group_name) subscription_id: $(subscription_id) test_suites: $(test_suites) + default_test_suites: $(default_test_suites) user: $(user) vm_name: $(vm_name) vm_size: $(vm_size) diff --git a/tests_e2e/orchestrator/scripts/install-agent b/tests_e2e/orchestrator/scripts/install-agent index d28164f6d3..cf5a9e8106 100755 --- a/tests_e2e/orchestrator/scripts/install-agent +++ b/tests_e2e/orchestrator/scripts/install-agent @@ -85,19 +85,19 @@ echo "Agent Version:" $python "$waagent" --version echo "Service Status:" -# Sometimes the service can take a while to start; give it a few minutes, +# We need to wait for the provisioning code to complete before stopping the agent's service to do the test setup started=false -for i in {1..6} +for i in {1..12} do - if service-status $service_name; then + if [[ -f /var/lib/waagent/provisioned ]]; then started=true break fi - echo "Waiting for service to start..." + echo "Waiting for agent to complete provisioning." sleep 30 done if [ $started == false ]; then - echo "Service failed to start." + echo "Provisioning did not complete within the given timeout (cannot find /var/lib/waagent/provisioned)" exit 1 fi diff --git a/tests_e2e/pipeline/pipeline-cleanup.yml b/tests_e2e/pipeline/pipeline-cleanup.yml index 66ff5f761a..c673f0378d 100644 --- a/tests_e2e/pipeline/pipeline-cleanup.yml +++ b/tests_e2e/pipeline/pipeline-cleanup.yml @@ -52,5 +52,5 @@ steps: --url-parameters api-version=2021-04-01 \$expand=createdTime \ --output json \ --query value \ - | jq --arg date "$date" '.[] | select (.createdTime < $date).name | match("'${pattern}'"; "g").string' \ + | jq --arg date "$date" '.[] | select (.createdTime < $date).name | match("'${pattern}'"; "i").string' \ | xargs -l -t -r az group delete --subscription "${subscription_id}" --no-wait -y -n diff --git a/tests_e2e/pipeline/pipeline.yml b/tests_e2e/pipeline/pipeline.yml index 23eef1ce5e..bccee67124 100644 --- a/tests_e2e/pipeline/pipeline.yml +++ b/tests_e2e/pipeline/pipeline.yml @@ -19,6 +19,11 @@ parameters: type: string default: "-" + - name: test_args + displayName: Test Args (additional arguments pass to the test suites. Comma-separated list of key=value pairs) + type: string + default: "-" + - name: image displayName: Image (image/image set name, URN, or VHD) type: string @@ -132,6 +137,7 @@ jobs: KEEP_ENVIRONMENT: ${{ parameters.keep_environment }} LOCATION: ${{ parameters.location }} TEST_SUITES: ${{ parameters.test_suites }} + TEST_ARGS: ${{ parameters.test_args }} VM_SIZE: ${{ parameters.vm_size }} - bash: $(Build.SourcesDirectory)/tests_e2e/pipeline/scripts/collect_artifacts.sh diff --git a/tests_e2e/pipeline/scripts/execute_tests.sh b/tests_e2e/pipeline/scripts/execute_tests.sh index bcba9710a8..6c751d6a78 100755 --- a/tests_e2e/pipeline/scripts/execute_tests.sh +++ b/tests_e2e/pipeline/scripts/execute_tests.sh @@ -54,6 +54,9 @@ if [[ $TEST_SUITES == "-" ]]; then else TEST_SUITES="-v test_suites:\"$TEST_SUITES\"" fi +if [[ $TEST_ARGS == "-" ]]; then + TEST_ARGS="" +fi if [[ $IMAGE == "-" ]]; then IMAGE="" fi @@ -99,4 +102,5 @@ docker run --rm \ -v location:\"$LOCATION\" \ -v vm_size:\"$VM_SIZE\" \ -v allow_ssh:\"$IP_ADDRESS\" \ + -v test_args:\"$TEST_ARGS\" \ $TEST_SUITES" diff --git a/tests_e2e/test_suites/agent_cgroups.yml b/tests_e2e/test_suites/agent_cgroups.yml index d6d1fc0f17..7844e606f4 100644 --- a/tests_e2e/test_suites/agent_cgroups.yml +++ b/tests_e2e/test_suites/agent_cgroups.yml @@ -1,9 +1,11 @@ # -# The test suite verify the agent running in expected cgroups and also, checks agent tracking the cgroups for polling resource metrics. Also, it verifies the agent cpu quota is set as expected. +# The test suite verify the agent running in expected cgroups and also, checks agent tracking the cgroups for polling resource metrics, +# checks unexpected processes in the agent cgroups, and it verifies the agent cpu quota is set as expected. # name: "AgentCgroups" tests: - "agent_cgroups/agent_cgroups.py" - "agent_cgroups/agent_cpu_quota.py" + - "agent_cgroups/agent_cgroups_process_check.py" images: "cgroups-endorsed" owns_vm: true \ No newline at end of file diff --git a/tests_e2e/test_suites/agent_firewall.yml b/tests_e2e/test_suites/agent_firewall.yml index 0e095ba39e..787c5173b4 100644 --- a/tests_e2e/test_suites/agent_firewall.yml +++ b/tests_e2e/test_suites/agent_firewall.yml @@ -12,4 +12,7 @@ tests: images: - "endorsed" - "endorsed-arm64" -owns_vm: true # This vm cannot be shared with other tests because it modifies the firewall rules and agent status. \ No newline at end of file +owns_vm: true # This vm cannot be shared with other tests because it modifies the firewall rules and agent status. +skip_on_images: + - "ubuntu_2204_minimal" # TODO: Currently Ubuntu minimal does not include the 'iptables' command. Remove it once this has been addressed. + - "ubuntu_2404_minimal" diff --git a/tests_e2e/test_suites/agent_persist_firewall.yml b/tests_e2e/test_suites/agent_persist_firewall.yml index 137f3af87e..ea877b9f0c 100644 --- a/tests_e2e/test_suites/agent_persist_firewall.yml +++ b/tests_e2e/test_suites/agent_persist_firewall.yml @@ -14,6 +14,10 @@ owns_vm: true # This vm cannot be shared with other tests because it modifies t # so skipping the test run on flatcar distro. # (2023-11-14T19:04:13.738695Z ERROR ExtHandler ExtHandler Unable to setup the persistent firewall rules: [Errno 30] Read-only file system: '/lib/systemd/system/waagent-network-setup.service) skip_on_images: + - "azure-linux_3" # TODO: the test in unstable on Azure Linux 3; skipping for now + - "azure-linux_3_arm64" # TODO: the test in unstable on Azure Linux 3; skipping for now + - "debian_9" # TODO: Reboot is slow on debian_9. Need to investigate further. - "flatcar" - "flatcar_arm64" - - "debian_9" # TODO: Reboot is slow on debian_9. Need to investigate further. \ No newline at end of file + - "ubuntu_2204_minimal" # TODO: Currently Ubuntu minimal does not include the 'iptables' command. Remove it once this has been addressed. + - "ubuntu_2404_minimal" diff --git a/tests_e2e/test_suites/agent_publish.yml b/tests_e2e/test_suites/agent_publish.yml index 3ab29c6a0b..8b11eb4e7e 100644 --- a/tests_e2e/test_suites/agent_publish.yml +++ b/tests_e2e/test_suites/agent_publish.yml @@ -7,6 +7,6 @@ tests: images: - "random(endorsed, 10)" - "random(endorsed-arm64, 2)" -locations: "AzureCloud:centraluseuap" +locations: "AzureCloud:eastus2euap" owns_vm: true install_test_agent: false \ No newline at end of file diff --git a/tests_e2e/test_suites/agent_wait_for_cloud_init.yml b/tests_e2e/test_suites/agent_wait_for_cloud_init.yml index 09c00aa7ee..154e183499 100644 --- a/tests_e2e/test_suites/agent_wait_for_cloud_init.yml +++ b/tests_e2e/test_suites/agent_wait_for_cloud_init.yml @@ -9,5 +9,4 @@ tests: - "agent_wait_for_cloud_init/agent_wait_for_cloud_init.py" template: "agent_wait_for_cloud_init/add_cloud_init_script.py" install_test_agent: false -# Dummy image, since the parameter is required. The actual image needs to be passed as a parameter to the runbook. -images: "ubuntu_2204" +images: "gallery/wait-cloud-init/1.0.2" diff --git a/tests_e2e/test_suites/cgroup_v2_disabled.yml b/tests_e2e/test_suites/cgroup_v2_disabled.yml new file mode 100644 index 0000000000..cf25ecdcfc --- /dev/null +++ b/tests_e2e/test_suites/cgroup_v2_disabled.yml @@ -0,0 +1,10 @@ +# +# The test suite verifies that the agent does not enable resource enforcement and monitoring on machines which are +# using cgroup v2. This suite will be removed once cgroup v2 is supported. +# +name: "Cgroupv2Disabled" +tests: + - "cgroup_v2_disabled/cgroup_v2_disabled.py" +images: + - "ubuntu_2204" + - "ubuntu_2404" \ No newline at end of file diff --git a/tests_e2e/test_suites/ext_sequencing.yml b/tests_e2e/test_suites/ext_sequencing.yml index 1976a85025..78c73dee11 100644 --- a/tests_e2e/test_suites/ext_sequencing.yml +++ b/tests_e2e/test_suites/ext_sequencing.yml @@ -7,4 +7,7 @@ tests: - "ext_sequencing/ext_sequencing.py" images: "endorsed" # This scenario is executed on instances of a scaleset created by the agent test suite. -executes_on_scale_set: true \ No newline at end of file +executes_on_scale_set: true +skip_on_images: # TODO: AzureMonitorLinuxAgent, used by this test, currently does not work on Azure Linux 3. Remove this once it is fixed. + - "azure-linux_3" + - "azure-linux_3_arm64" diff --git a/tests_e2e/test_suites/images.yml b/tests_e2e/test_suites/images.yml index 03c1bfd77c..0935bffd7e 100644 --- a/tests_e2e/test_suites/images.yml +++ b/tests_e2e/test_suites/images.yml @@ -17,8 +17,8 @@ image-sets: - "debian_11" - "flatcar" - "suse_12" - - "mariner_1" - "mariner_2" + - "azure-linux_3" - "suse_15" - "rhel_79" - "rhel_82" @@ -28,15 +28,19 @@ image-sets: - "ubuntu_1804" - "ubuntu_2004" - "ubuntu_2204" + - "ubuntu_2204_minimal" - "ubuntu_2404" + - "ubuntu_2404_minimal" # Endorsed distros (ARM64) that are tested on the daily runs endorsed-arm64: - "debian_11_arm64" - "flatcar_arm64" - "mariner_2_arm64" + - "azure-linux_3_arm64" - "rhel_90_arm64" - "ubuntu_2204_arm64" + - "ubuntu_2404_arm64" # As of today agent only support and enabled resource governance feature on following distros cgroups-endorsed: @@ -52,6 +56,20 @@ image-sets: - "oracle_610" - "rhel_610" + # These are the distros which have periodic log collector support. + log-collector-endorsed: + - "centos_82" + - "rhel_82" + - "ubuntu_1604" + - "ubuntu_1804" + - "ubuntu_2004" + - "ubuntu_2204" + - "ubuntu_2204_minimal" + - "ubuntu_2204_arm64" + - "ubuntu_2404" + - "ubuntu_2404_minimal" + - "ubuntu_2404_arm64" + # # An image can be specified by a string giving its urn, as in # @@ -85,6 +103,16 @@ images: urn: "almalinux almalinux 9-gen2 latest" locations: AzureChinaCloud: [] + azure-linux_3: + urn: "microsoftcblmariner azure-linux-3 azure-linux-3 latest" + locations: + AzureUSGovernment: [] + AzureChinaCloud: [] + azure-linux_3_arm64: + urn: "microsoftcblmariner azure-linux-3 azure-linux-3-arm64 latest" + locations: + AzureUSGovernment: [] + AzureChinaCloud: [] centos_610: "OpenLogic CentOS 6.10 latest" centos_75: "OpenLogic CentOS 7.5 latest" centos_79: "OpenLogic CentOS 7_9 latest" @@ -178,9 +206,16 @@ images: locations: AzureChinaCloud: [] AzureUSGovernment: [] - ubuntu_2404: + ubuntu_2204_minimal: "Canonical 0001-com-ubuntu-minimal-jammy minimal-22_04-lts-gen2 latest" + ubuntu_2404: "Canonical ubuntu-24_04-lts server latest" + ubuntu_2404_arm64: + urn: "Canonical ubuntu-24_04-lts server-arm64 latest" + locations: + AzureChinaCloud: [] + AzureUSGovernment: [] + ubuntu_2404_minimal: # TODO: Currently using the daily build, update to the release build once it is available - urn: "Canonical 0001-com-ubuntu-server-noble-daily 24_04-daily-lts-gen2 latest" + urn: "Canonical ubuntu-24_04-lts-daily minimal latest" locations: AzureChinaCloud: [] AzureUSGovernment: [] diff --git a/tests_e2e/test_suites/initial_agent_update.yml b/tests_e2e/test_suites/initial_agent_update.yml new file mode 100644 index 0000000000..6dc039d625 --- /dev/null +++ b/tests_e2e/test_suites/initial_agent_update.yml @@ -0,0 +1,13 @@ +# +# This test verifies that the Agent does initial update on very first goal state before it starts processing extensions for new vms that are enrolled into RSM. +# +# NOTE: This test_suite is not fully automated. It requires a custom image where custom pre-installed Agent has been installed with version 2.8.9.9. Creation of custom images is not automated currently. +# But daily run is automated and test suite will pass shared gallery custom image reference in images list +# +# +name: "InitialAgentUpdate" +tests: + - "initial_agent_update/initial_agent_update.py" +install_test_agent: false +images: "gallery/initial-agent-update/1.0.0" +locations: "AzureCloud:eastus2euap" diff --git a/tests_e2e/test_suites/keyvault_certificates.yml b/tests_e2e/test_suites/keyvault_certificates.yml index 00c51db7d2..c63a4be1f7 100644 --- a/tests_e2e/test_suites/keyvault_certificates.yml +++ b/tests_e2e/test_suites/keyvault_certificates.yml @@ -1,5 +1,10 @@ # -# This test verifies that the Agent can download and extract KeyVault certificates that use different encryption algorithms +# This test verifies that the Agent can download and extract KeyVault certificates that use different encryption +# algorithms (currently RSA and EC). +# +# The test needs exclusive use of the VM because support for EC certificates was added on version 2.10. Daemons +# older than that version will fail to parse the certificates, and go on an infinite loop when fetching the goal +# state. # name: "KeyvaultCertificates" tests: @@ -7,3 +12,4 @@ tests: images: - "endorsed" - "endorsed-arm64" +owns_vm: true diff --git a/tests_e2e/test_suites/log_collector.yml b/tests_e2e/test_suites/log_collector.yml new file mode 100644 index 0000000000..496198f921 --- /dev/null +++ b/tests_e2e/test_suites/log_collector.yml @@ -0,0 +1,8 @@ +# +# This test is used to verify that the log collector logs the expected behavior on periodic runs. +# +name: "LogCollector" +tests: + - "log_collector/log_collector.py" +images: + - "random(log-collector-endorsed, 1)" diff --git a/tests_e2e/test_suites/multi_config_ext.yml b/tests_e2e/test_suites/multi_config_ext.yml index 24bdaa7366..1856a4d061 100644 --- a/tests_e2e/test_suites/multi_config_ext.yml +++ b/tests_e2e/test_suites/multi_config_ext.yml @@ -7,3 +7,6 @@ tests: - "multi_config_ext/multi_config_ext.py" images: - "endorsed" +# TODO: This test has been failing due to issues in the RC2 extension on AzureCloud. Re-enable once the extension has been fixed. +skip_on_clouds: + - "AzureCloud" diff --git a/tests_e2e/tests/agent_cgroups/agent_cgroups_process_check.py b/tests_e2e/tests/agent_cgroups/agent_cgroups_process_check.py new file mode 100644 index 0000000000..d0996caec8 --- /dev/null +++ b/tests_e2e/tests/agent_cgroups/agent_cgroups_process_check.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 + +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import List, Dict, Any + +from tests_e2e.tests.lib.agent_test import AgentVmTest +from tests_e2e.tests.lib.agent_test_context import AgentVmTestContext +from tests_e2e.tests.lib.logging import log +from tests_e2e.tests.lib.virtual_machine_extension_client import VirtualMachineExtensionClient +from tests_e2e.tests.lib.vm_extension_identifier import VmExtensionIds + + +class AgentCgroupsProcessCheck(AgentVmTest): + """ + Tests the agent's ability to detect processes that do not belong to the agent's cgroup + """ + def __init__(self, context: AgentVmTestContext): + super().__init__(context) + self._ssh_client = self._context.create_ssh_client() + + def run(self): + """ + Steps: + 1. Verify that agent detects processes that do not belong to the agent's cgroup and disable the cgroups + 2. Run the extension, so that they are run in the agent's cgroup + 3. Restart the ext_handler process to re-initialize the cgroups setup + 4. Verify that agent detects extension processes and will not enable the cgroups + """ + + log.info("=====Validating agent cgroups process check") + self._run_remote_test(self._ssh_client, "agent_cgroups_process_check-unknown_process_check.py", use_sudo=True) + + self._install_ama_extension() + + log.info("=====Validating agent cgroups not enabled") + self._run_remote_test(self._ssh_client, "agent_cgroups_process_check-cgroups_not_enabled.py", use_sudo=True) + + def _install_ama_extension(self): + ama_extension = VirtualMachineExtensionClient( + self._context.vm, VmExtensionIds.AzureMonitorLinuxAgent, + resource_name="AMAAgent") + log.info("Installing %s", ama_extension) + ama_extension.enable() + ama_extension.assert_instance_view() + + def get_ignore_error_rules(self) -> List[Dict[str, Any]]: + + ignore_rules = [ + # This is produced by the test, so it is expected + # Examples: + # 2024-04-01T19:16:11.929000Z INFO MonitorHandler ExtHandler [CGW] Disabling resource usage monitoring. Reason: Check on cgroups failed: + # [CGroupsException] The agent's cgroup includes unexpected processes: ['[PID: 2957] dd\x00if=/dev/zero\x00of=/dev/null\x00 '] + # 2024-04-01T19:17:04.995276Z WARNING ExtHandler ExtHandler [CGroupsException] The agent's cgroup includes unexpected processes: ['[PID: 3285] /usr/bin/python3\x00/var/lib/waagent/Microsoft.Azure.Monitor.AzureM', '[PID: 3286] /usr/bin/python3\x00/var/lib/waagent/Microsoft.Azure.Monitor.AzureM'] + {'message': r"The agent's cgroup includes unexpected processes"}, + {'message': r"Found unexpected processes in the agent cgroup before agent enable cgroups"} + ] + return ignore_rules + + +if __name__ == "__main__": + AgentCgroupsProcessCheck.run_from_command_line() diff --git a/tests_e2e/tests/agent_ext_workflow/extension_workflow.py b/tests_e2e/tests/agent_ext_workflow/extension_workflow.py index b5a377e726..3f25c6a6bc 100644 --- a/tests_e2e/tests/agent_ext_workflow/extension_workflow.py +++ b/tests_e2e/tests/agent_ext_workflow/extension_workflow.py @@ -114,7 +114,7 @@ def assert_instance_view(self, data=None): def assert_data_in_instance_view(self, instance_view: VirtualMachineExtensionInstanceView): log.info("Asserting extension status ...") status_message = instance_view.statuses[0].message - log.info("Status message: %s" % status_message) + log.info("Status message: %s", status_message) with soft_assertions(): expected_ext_version = "%s-%s" % (self.name, self.version) diff --git a/tests_e2e/tests/agent_publish/agent_publish.py b/tests_e2e/tests/agent_publish/agent_publish.py index 33ba65db79..83c3f71607 100644 --- a/tests_e2e/tests/agent_publish/agent_publish.py +++ b/tests_e2e/tests/agent_publish/agent_publish.py @@ -18,10 +18,13 @@ # import uuid from datetime import datetime -from typing import Any, Dict, List + +from assertpy import fail from tests_e2e.tests.lib.agent_test import AgentVmTest from tests_e2e.tests.lib.agent_test_context import AgentVmTestContext +from tests_e2e.tests.lib.agent_update_helpers import request_rsm_update +from tests_e2e.tests.lib.retry import retry_if_false from tests_e2e.tests.lib.vm_extension_identifier import VmExtensionIds, VmExtensionIdentifier from tests_e2e.tests.lib.logging import log from tests_e2e.tests.lib.ssh_client import SshClient @@ -36,40 +39,126 @@ class AgentPublishTest(AgentVmTest): def __init__(self, context: AgentVmTestContext): super().__init__(context) self._ssh_client: SshClient = self._context.create_ssh_client() + self._published_version = self._get_published_version() def run(self): """ we run the scenario in the following steps: 1. Print the current agent version before the update 2. Prepare the agent for the update - 3. Check for agent update from the log - 4. Print the agent version after the update - 5. Ensure CSE is working + 3. Check for agent update from the log and waagent version + 4. Ensure CSE is working """ self._get_agent_info() - self._prepare_agent() - self._check_update() - self._get_agent_info() + + log.info("Testing rsm update flow....") + self._prepare_agent_for_rsm_update() + self._check_update_from_log() + self._verify_current_agent_version() + self._check_cse() + + log.info("Testing self update flow....") + self._prepare_agent_for_self_update() + self._check_update_from_log() + self._verify_current_agent_version() + self._check_cse() def get_ignore_errors_before_timestamp(self) -> datetime: timestamp = self._ssh_client.run_command("agent_publish-get_agent_log_record_timestamp.py") return datetime.strptime(timestamp.strip(), u'%Y-%m-%d %H:%M:%S.%f') + def _get_published_version(self): + """ + Gets version from test_args if provided, else use the release version from source code version.py + """ + if hasattr(self._context, "published_version"): + return self._context.published_version + + version = self._ssh_client.run_command("pypy3 -c 'from azurelinuxagent.common.version import AGENT_VERSION; print(AGENT_VERSION)'").rstrip() + return version + def _get_agent_info(self) -> None: stdout: str = self._ssh_client.run_command("waagent-version", use_sudo=True) log.info('Agent info \n%s', stdout) - def _prepare_agent(self) -> None: + def _verify_agent_reported_supported_feature_flag(self): + """ + RSM update rely on supported feature flag that agent sends to CRP.So, checking if GA reports feature flag from reported status + """ + log.info( + "Executing verify_versioning_supported_feature.py remote script to verify agent reported supported feature flag, so that CRP can send RSM update request") + self._run_remote_test(self._ssh_client, "agent_update-verify_versioning_supported_feature.py", use_sudo=True) + log.info("Successfully verified that Agent reported VersioningGovernance supported feature flag") + + def _check_rsm_gs(self, requested_version: str) -> None: + # This checks if RSM GS available to the agent after we send the rsm update request + log.info( + 'Executing wait_for_rsm_gs.py remote script to verify latest GS contain requested version after rsm update requested') + self._run_remote_test(self._ssh_client, f"agent_update-wait_for_rsm_gs.py --version {requested_version}", + use_sudo=True) + log.info('Verified latest GS contain requested version after rsm update requested') + + def _prepare_agent_for_rsm_update(self) -> None: + """ + This method prepares the agent for the RSM update + """ + # First we update the agent to latest version like prod + # Next send RSM update request for new published test version + log.info( + 'Updating agent config flags to allow and download test versions') + output: str = self._ssh_client.run_command( + "update-waagent-conf AutoUpdate.Enabled=y AutoUpdate.UpdateToLatestVersion=y", use_sudo=True) + log.info('Successfully updated agent update config \n %s', output) + + self._verify_agent_reported_supported_feature_flag() + arch_type = self._ssh_client.get_architecture() + request_rsm_update(self._published_version, self._context.vm, arch_type) + self._check_rsm_gs(self._published_version) + + output: str = self._ssh_client.run_command( + "update-waagent-conf Debug.EnableGAVersioning=y AutoUpdate.GAFamily=Test", use_sudo=True) + log.info('Successfully enabled rsm updates \n %s', output) + + def _prepare_agent_for_self_update(self) -> None: + """ + This method prepares the agent for the self update + """ log.info("Modifying agent update related config flags and renaming the log file") - self._run_remote_test(self._ssh_client, "sh -c 'agent-service stop && mv /var/log/waagent.log /var/log/waagent.$(date --iso-8601=seconds).log && update-waagent-conf AutoUpdate.UpdateToLatestVersion=y AutoUpdate.GAFamily=Test AutoUpdate.Enabled=y Extensions.Enabled=y Debug.EnableGAVersioning=n'", use_sudo=True) - log.info('Renamed log file and updated agent-update DownloadNewAgents GAFamily config flags') + setup_script = ("agent-service stop && mv /var/log/waagent.log /var/log/waagent.$(date --iso-8601=seconds).log && " + "rm -rf /var/lib/waagent/WALinuxAgent-* && " + "update-waagent-conf AutoUpdate.UpdateToLatestVersion=y AutoUpdate.GAFamily=Test AutoUpdate.Enabled=y Extensions.Enabled=y Debug.EnableGAVersioning=n") + self._run_remote_test(self._ssh_client, f"sh -c '{setup_script}'", use_sudo=True) + log.info('Renamed log file and updated self-update config flags') - def _check_update(self) -> None: + def _check_update_from_log(self) -> None: log.info("Verifying for agent update status") - self._run_remote_test(self._ssh_client, "agent_publish-check_update.py") + self._run_remote_test(self._ssh_client, f"agent_publish-check_update.py --published-version {self._published_version}") log.info('Successfully checked the agent update') + def _verify_current_agent_version(self) -> None: + """ + Verify current agent version running on published version + """ + + def _check_agent_version(version: str) -> bool: + waagent_version: str = self._ssh_client.run_command("waagent-version", use_sudo=True) + expected_version = f"Goal state agent: {version}" + if expected_version in waagent_version: + return True + else: + return False + + waagent_version: str = "" + log.info("Verifying agent updated to published version: {0}".format(self._published_version)) + success: bool = retry_if_false(lambda: _check_agent_version(self._published_version)) + if not success: + fail("Guest agent didn't update to published version {0} but found \n {1}. \n ".format( + self._published_version, waagent_version)) + waagent_version: str = self._ssh_client.run_command("waagent-version", use_sudo=True) + log.info( + f"Successfully verified agent updated to published version. Current agent version running:\n {waagent_version}") + def _check_cse(self) -> None: custom_script_2_1 = VirtualMachineExtensionClient( self._context.vm, @@ -86,20 +175,6 @@ def _check_cse(self) -> None: ) custom_script_2_1.assert_instance_view(expected_version="2.1", expected_message=message) - def get_ignore_error_rules(self) -> List[Dict[str, Any]]: - ignore_rules = [ - # - # This is expected as latest version can be the less than test version - # - # WARNING ExtHandler ExtHandler Agent WALinuxAgent-9.9.9.9 is permanently blacklisted - # - { - 'message': r"Agent WALinuxAgent-9.9.9.9 is permanently blacklisted" - } - - ] - return ignore_rules - if __name__ == "__main__": AgentPublishTest.run_from_command_line() diff --git a/tests_e2e/tests/agent_update/rsm_update.py b/tests_e2e/tests/agent_update/rsm_update.py index 89c186a2f1..ad2222d11e 100644 --- a/tests_e2e/tests/agent_update/rsm_update.py +++ b/tests_e2e/tests/agent_update/rsm_update.py @@ -23,22 +23,16 @@ # The test verifies agent update for rsm workflow. This test covers three scenarios downgrade, upgrade and no update. # For each scenario, we initiate the rsm request with target version and then verify agent updated to that target version. # -import json import re from typing import List, Dict, Any -import requests from assertpy import assert_that, fail -from azure.identity import DefaultAzureCredential -from azure.mgmt.compute.models import VirtualMachine -from msrestazure.azure_cloud import Cloud from tests_e2e.tests.lib.agent_test import AgentVmTest from tests_e2e.tests.lib.agent_test_context import AgentVmTestContext -from tests_e2e.tests.lib.azure_clouds import AZURE_CLOUDS +from tests_e2e.tests.lib.agent_update_helpers import request_rsm_update from tests_e2e.tests.lib.logging import log from tests_e2e.tests.lib.retry import retry_if_false -from tests_e2e.tests.lib.virtual_machine_client import VirtualMachineClient class RsmUpdateBvt(AgentVmTest): @@ -71,6 +65,7 @@ def get_ignore_error_rules(self) -> List[Dict[str, Any]]: return ignore_rules def run(self) -> None: + arch_type = self._ssh_client.get_architecture() # retrieve the installed agent version in the vm before run the scenario self._retrieve_installed_agent_version() # Allow agent to send supported feature flag @@ -81,7 +76,7 @@ def run(self) -> None: log.info("Current agent version running on the vm before update is \n%s", stdout) self._downgrade_version: str = "2.3.15.0" log.info("Attempting downgrade version %s", self._downgrade_version) - self._request_rsm_update(self._downgrade_version) + request_rsm_update(self._downgrade_version, self._context.vm, arch_type) self._check_rsm_gs(self._downgrade_version) self._prepare_agent() # Verify downgrade scenario @@ -94,7 +89,7 @@ def run(self) -> None: log.info("Current agent version running on the vm before update is \n%s", stdout) upgrade_version: str = "2.3.15.1" log.info("Attempting upgrade version %s", upgrade_version) - self._request_rsm_update(upgrade_version) + request_rsm_update(upgrade_version, self._context.vm, arch_type) self._check_rsm_gs(upgrade_version) self._verify_guest_agent_update(upgrade_version) self._verify_agent_reported_update_status(upgrade_version) @@ -105,7 +100,7 @@ def run(self) -> None: log.info("Current agent version running on the vm before update is \n%s", stdout) current_version: str = "2.3.15.1" log.info("Attempting update version same as current version %s", current_version) - self._request_rsm_update(current_version) + request_rsm_update(current_version, self._context.vm, arch_type) self._check_rsm_gs(current_version) self._verify_guest_agent_update(current_version) self._verify_agent_reported_update_status(current_version) @@ -117,7 +112,7 @@ def run(self) -> None: log.info("Current agent version running on the vm before update is \n%s", stdout) version: str = "1.5.0.0" log.info("Attempting requested version %s", version) - self._request_rsm_update(version) + request_rsm_update(version, self._context.vm, arch_type) self._check_rsm_gs(version) self._verify_no_guest_agent_update(version) self._verify_agent_reported_update_status(version) @@ -146,64 +141,6 @@ def _prepare_agent(self) -> None: "update-waagent-conf AutoUpdate.UpdateToLatestVersion=y Debug.EnableGAVersioning=y AutoUpdate.GAFamily=Test", use_sudo=True) log.info('Successfully updated agent update config \n %s', output) - @staticmethod - def _verify_agent_update_flag_enabled(vm: VirtualMachineClient) -> bool: - result: VirtualMachine = vm.get_model() - flag: bool = result.os_profile.linux_configuration.enable_vm_agent_platform_updates - if flag is None: - return False - return flag - - def _enable_agent_update_flag(self, vm: VirtualMachineClient) -> None: - osprofile = { - "location": self._context.vm.location, # location is required field - "properties": { - "osProfile": { - "linuxConfiguration": { - "enableVMAgentPlatformUpdates": True - } - } - } - } - log.info("updating the vm with osProfile property:\n%s", osprofile) - vm.update(osprofile) - - def _request_rsm_update(self, requested_version: str) -> None: - """ - This method is to simulate the rsm request. - First we ensure the PlatformUpdates enabled in the vm and then make a request using rest api - """ - if not self._verify_agent_update_flag_enabled(self._context.vm): - # enable the flag - log.info("Attempting vm update to set the enableVMAgentPlatformUpdates flag") - self._enable_agent_update_flag(self._context.vm) - log.info("Updated the enableVMAgentPlatformUpdates flag to True") - else: - log.info("Already enableVMAgentPlatformUpdates flag set to True") - - cloud: Cloud = AZURE_CLOUDS[self._context.vm.cloud] - credential: DefaultAzureCredential = DefaultAzureCredential(authority=cloud.endpoints.active_directory) - token = credential.get_token(cloud.endpoints.resource_manager + "/.default") - headers = {'Authorization': 'Bearer ' + token.token, 'Content-Type': 'application/json'} - # Later this api call will be replaced by azure-python-sdk wrapper - base_url = cloud.endpoints.resource_manager - url = base_url + "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}/" \ - "UpgradeVMAgent?api-version=2022-08-01".format(self._context.vm.subscription, - self._context.vm.resource_group, - self._context.vm.name) - data = { - "target": "Microsoft.OSTCLinuxAgent.Test", - "targetVersion": requested_version - } - - log.info("Attempting rsm upgrade post request to endpoint: {0} with data: {1}".format(url, data)) - response = requests.post(url, data=json.dumps(data), headers=headers) - if response.status_code == 202: - log.info("RSM upgrade request accepted") - else: - raise Exception("Error occurred while making RSM upgrade request. Status code : {0} and msg: {1}".format( - response.status_code, response.content)) - def _verify_guest_agent_update(self, requested_version: str) -> None: """ Verify current agent version running on rsm requested version diff --git a/tests_e2e/tests/agent_update/self_update.py b/tests_e2e/tests/agent_update/self_update.py index 2aedb72f41..947c26ecc8 100644 --- a/tests_e2e/tests/agent_update/self_update.py +++ b/tests_e2e/tests/agent_update/self_update.py @@ -123,13 +123,12 @@ def _check_agent_version(latest_version: str) -> bool: else: return False - waagent_version: str = "" log.info("Verifying agent updated to latest version: {0}".format(latest_version)) success: bool = retry_if_false(lambda: _check_agent_version(latest_version), delay=60) + waagent_version: str = self._ssh_client.run_command("waagent-version", use_sudo=True) if not success: fail("Guest agent didn't update to latest version {0} but found \n {1}".format( latest_version, waagent_version)) - waagent_version: str = self._ssh_client.run_command("waagent-version", use_sudo=True) log.info( f"Successfully verified agent updated to latest version. Current agent version running:\n {waagent_version}") diff --git a/tests_e2e/tests/cgroup_v2_disabled/cgroup_v2_disabled.py b/tests_e2e/tests/cgroup_v2_disabled/cgroup_v2_disabled.py new file mode 100644 index 0000000000..7ab0ca0ff8 --- /dev/null +++ b/tests_e2e/tests/cgroup_v2_disabled/cgroup_v2_disabled.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import time + +from assertpy import fail + +from tests_e2e.tests.lib.agent_test import AgentVmTest +from tests_e2e.tests.lib.agent_test_context import AgentVmTestContext +from tests_e2e.tests.lib.logging import log +from tests_e2e.tests.lib.shell import CommandError +from tests_e2e.tests.lib.ssh_client import SshClient + + +class Cgroupv2Disabled(AgentVmTest): + """ + The test verifies that the agent does not enable resource enforcement and monitoring on machines which are using + cgroup v2. It also checks that the agent correctly determined the controller mount points. This test will be + removed once cgroup v2 is supported. + """ + + def __init__(self, context: AgentVmTestContext): + super().__init__(context) + self._ssh_client: SshClient = self._context.create_ssh_client() + + def check_agent_log_contains(self, data, assertion): + try: + self._ssh_client.run_command("grep \"{0}\" /var/log/waagent.log".format(data)) + except CommandError: + fail("{0}".format(assertion)) + + def run(self): + # Cgroup configurator is initialized when agent is started, and before the goal state processing period is + # logged. Wait until the agent logs the goal state period before checking for cgroup initialization logs. + log.info("Wait for cgroup configurator to be initialized...") + for _ in range(15): + try: + self._ssh_client.run_command("grep 'Goal State Period:' /var/log/waagent.log") + break + except CommandError: + log.info("The Agent has not initialized cgroups yet, will check again after a short delay") + time.sleep(60) + else: + raise Exception("Timeout while waiting for the Agent to initialize cgroups") + + # Verify that the agent chose v2 for resource enforcement and monitoring + log.info("") + log.info("Checking that the agent chose cgroup v2 api for resource enforcement and monitoring...") + self.check_agent_log_contains('Using cgroup v2 for resource enforcement and monitoring', 'The agent should choose v2 for api resource enforcement and monitoring') + + # Verify that the agent does not support cgroup v2 + log.info("") + log.info("Checking that the agent does not use cgroup v2 for resource enforcement and monitoring...") + self.check_agent_log_contains('Agent and extensions resource monitoring is not currently supported on cgroup v2', + 'The agent should not attempt to use cgroup v2 for resource enforcement and monitoring') + self.check_agent_log_contains('Agent cgroups enabled: False', + 'The agent should not enable cgroups when system is using v2') + + +if __name__ == "__main__": + Cgroupv2Disabled.run_from_command_line() diff --git a/tests_e2e/tests/ext_sequencing/ext_sequencing.py b/tests_e2e/tests/ext_sequencing/ext_sequencing.py index 69c3a7291a..7bd9c93dfa 100644 --- a/tests_e2e/tests/ext_sequencing/ext_sequencing.py +++ b/tests_e2e/tests/ext_sequencing/ext_sequencing.py @@ -22,6 +22,7 @@ # validates they are enabled in order of dependencies. # import copy +import random import re import uuid from datetime import datetime @@ -69,7 +70,7 @@ def __init__(self, context: AgentVmTestContext): @staticmethod def _get_dependency_map(extensions: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: - dependency_map: Dict[str, Dict[str, Any]] = dict() + dependency_map: Dict[str, Dict[str, Any]] = {} for ext in extensions: ext_name = ext['name'] @@ -95,10 +96,8 @@ def _get_sorted_extension_names(extensions: List[VirtualMachineScaleSetVMExtensi for ext in extensions: # Only check extensions which succeeded provisioning if "succeeded" in ext.statuses_summary[0].code: - enabled_time = ssh_client.run_command(f"ext_sequencing-get_ext_enable_time.py --ext '{extension_full_names[ext.name]}'", use_sudo=True) - formatted_time = datetime.strptime(enabled_time.strip(), u'%Y-%m-%dT%H:%M:%SZ') - if formatted_time < test_case_start: - fail("Extension {0} was not enabled".format(extension_full_names[ext.name])) + enabled_time = ssh_client.run_command(f"ext_sequencing-get_ext_enable_time.py --ext '{extension_full_names[ext.name]}' --after_time '{test_case_start}'", use_sudo=True) + formatted_time = datetime.strptime(enabled_time.strip(), u'%Y-%m-%dT%H:%M:%S.%fZ') enabled_times.append( { "name": ext.name, @@ -116,7 +115,7 @@ def _get_sorted_extension_names(extensions: List[VirtualMachineScaleSetVMExtensi @staticmethod def _validate_extension_sequencing(dependency_map: Dict[str, Dict[str, Any]], sorted_extension_names: List[str], relax_check: bool): - installed_ext = dict() + installed_ext = {} # Iterate through the extensions in the enabled order and validate if their depending extensions are already # enabled prior to that. @@ -155,7 +154,7 @@ def _validate_extension_sequencing(dependency_map: Dict[str, Dict[str, Any]], so def run(self): instances_ip_address: List[VmssInstanceIpAddress] = self._context.vmss.get_instances_ip_address() - ssh_clients: Dict[str, SshClient] = dict() + ssh_clients: Dict[str, SshClient] = {} for instance in instances_ip_address: ssh_clients[instance.instance_name] = SshClient(ip_address=instance.ip_address, username=self._context.username, identity_file=self._context.identity_file) @@ -184,7 +183,7 @@ def run(self): } for case in self._test_cases: - test_case_start = datetime.now() + test_case_start = random.choice(list(ssh_clients.values())).run_command("date '+%Y-%m-%d %T'").rstrip() if self._scenario_start == datetime.min: self._scenario_start = test_case_start @@ -201,6 +200,7 @@ def run(self): # test out log.info("") log.info("Test case: {0}".format(case.__name__.replace('_', ' '))) + log.info("Test case start time: {0}".format(test_case_start)) ext_template = copy.deepcopy(base_extension_template) ext_template['resources'][0]['properties']['virtualMachineProfile']['extensionProfile'][ 'extensions'] = extensions @@ -255,7 +255,9 @@ def run(self): def get_ignore_errors_before_timestamp(self) -> datetime: # Ignore errors in the agent log before the first test case starts - return self._scenario_start + if self._scenario_start == datetime.min: + return self._scenario_start + return datetime.strptime(self._scenario_start, u'%Y-%m-%d %H:%M:%S') def get_ignore_error_rules(self) -> List[Dict[str, Any]]: ignore_rules = [ diff --git a/tests_e2e/tests/initial_agent_update/initial_agent_update.py b/tests_e2e/tests/initial_agent_update/initial_agent_update.py new file mode 100644 index 0000000000..455dcd3eee --- /dev/null +++ b/tests_e2e/tests/initial_agent_update/initial_agent_update.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from assertpy import fail + +from tests_e2e.tests.lib.agent_test import AgentVmTest +from tests_e2e.tests.lib.agent_test_context import AgentVmTestContext +from tests_e2e.tests.lib.logging import log +from tests_e2e.tests.lib.retry import retry_if_false + + +class InitialAgentUpdate(AgentVmTest): + """ + This test verifies that the Agent does initial update on very first goal state before it starts processing extensions for new vms that are enrolled into RSM + """ + def __init__(self, context: AgentVmTestContext): + super().__init__(context) + self._ssh_client = self._context.create_ssh_client() + self._test_version = "2.8.9.9" + + def run(self): + + log.info("Testing initial agent update for new vms that are enrolled into RSM") + + log.info("Retrieving latest version from goal state to verify initial agent update") + latest_version: str = self._ssh_client.run_command("agent_update-self_update_latest_version.py --family_type Prod", + use_sudo=True).rstrip() + log.info("Latest Version: %s", latest_version) + self._verify_agent_updated_to_latest_version(latest_version) + self._verify_agent_updated_before_processing_goal_state(latest_version) + + def _verify_agent_updated_to_latest_version(self, latest_version: str) -> None: + """ + Verifies the agent updated to latest version from custom image test version. + """ + log.info("Verifying agent updated to latest version: {0} from custom image test version: {1}".format(latest_version, self._test_version)) + self._verify_guest_agent_update(latest_version) + + def _verify_guest_agent_update(self, latest_version: str) -> None: + """ + Verify current agent version running on latest version + """ + + def _check_agent_version(latest_version: str) -> bool: + waagent_version: str = self._ssh_client.run_command("waagent-version", use_sudo=True) + expected_version = f"Goal state agent: {latest_version}" + if expected_version in waagent_version: + return True + else: + return False + + log.info("Running waagent --version and checking Goal state agent version") + success: bool = retry_if_false(lambda: _check_agent_version(latest_version), delay=60) + waagent_version: str = self._ssh_client.run_command("waagent-version", use_sudo=True) + if not success: + fail("Guest agent didn't update to latest version {0} but found \n {1}".format( + latest_version, waagent_version)) + log.info( + f"Successfully verified agent updated to latest version. Current agent version running:\n {waagent_version}") + + def _verify_agent_updated_before_processing_goal_state(self, latest_version) -> None: + log.info("Checking agent log if agent does initial update with self-update before processing goal state") + + output = self._ssh_client.run_command( + "initial_agent_update-agent_update_check_from_log.py --current_version {0} --latest_version {1}".format(self._test_version, latest_version)) + log.info(output) diff --git a/tests_e2e/tests/keyvault_certificates/keyvault_certificates.py b/tests_e2e/tests/keyvault_certificates/keyvault_certificates.py index 7be3f272c0..0638eda305 100755 --- a/tests_e2e/tests/keyvault_certificates/keyvault_certificates.py +++ b/tests_e2e/tests/keyvault_certificates/keyvault_certificates.py @@ -20,6 +20,9 @@ # # This test verifies that the Agent can download and extract KeyVault certificates that use different encryption algorithms (currently EC and RSA). # +import datetime +import time + from assertpy import fail from tests_e2e.tests.lib.agent_test import AgentVmTest @@ -82,13 +85,23 @@ def run(self): log.info("Reapplying the goal state to ensure the test certificates are downloaded.") self._context.vm.reapply() - try: - output = ssh_client.run_command(f"ls {expected_certificates}", use_sudo=True) - log.info("Found all the expected certificates:\n%s", output) - except CommandError as error: - if error.stdout != "": - log.info("Found some of the expected certificates:\n%s", error.stdout) - fail(f"Failed to find certificates\n{error.stderr}") + # If the goal state includes only the certificates, but no extensions, the update/reapply operations may complete before the Agent has downloaded the certificates + # so we retry for a few minutes to ensure the certificates are downloaded. + timed_out = datetime.datetime.utcnow() + datetime.timedelta(minutes=5) + while True: + try: + output = ssh_client.run_command(f"ls {expected_certificates}", use_sudo=True) + log.info("Found all the expected certificates:\n%s", output) + break + except CommandError as error: + if error.stdout == "": + if datetime.datetime.utcnow() < timed_out: + log.info("The certificates have not been downloaded yet, will retry after a short delay.") + time.sleep(30) + continue + else: + log.info("Found some of the expected certificates:\n%s", error.stdout) + fail(f"Failed to find certificates\n{error.stderr}") if __name__ == "__main__": diff --git a/tests_e2e/tests/lib/agent_log.py b/tests_e2e/tests/lib/agent_log.py index 60d42ec75c..9c02406f96 100644 --- a/tests_e2e/tests/lib/agent_log.py +++ b/tests_e2e/tests/lib/agent_log.py @@ -367,7 +367,37 @@ def get_errors(self) -> List[AgentLogRecord]: { 'message': r"AutoUpdate.Enabled property is \*\*Deprecated\*\* now but it's set to different value from AutoUpdate.UpdateToLatestVersion", 'if': lambda r: r.prefix == 'ExtHandler' and r.thread == 'ExtHandler' - } + }, + # + # TODO: Currently Ubuntu minimal does not include the 'iptables' command. Remove this rule once this has been addressed. + # + # We don't have an easy way to distinguish Ubuntu minimal, so this rule suppresses for any Ubuntu. This is OK; if 'iptables' was missing from the regular Ubuntu images, the firewall tests would fail. + # + # 2024-03-27T16:12:35.666460Z ERROR ExtHandler ExtHandler Unable to setup the persistent firewall rules: Unable to determine version of iptables: [Errno 2] No such file or directory: 'iptables' + # 2024-03-27T16:12:35.667253Z WARNING ExtHandler ExtHandler Unable to determine version of iptables: [Errno 2] No such file or directory: 'iptables' + # + { + 'message': r"Unable to determine version of iptables: \[Errno 2\] No such file or directory: 'iptables'", + 'if': lambda r: DISTRO_NAME == 'ubuntu' + }, + # + # TODO: The Daemon has not been updated on Azure Linux 3; remove this message when it is. + # + # 2024-08-05T14:36:48.004865Z WARNING Daemon Daemon Unable to load distro implementation for azurelinux. Using default distro implementation instead. + # + { + 'message': r"Unable to load distro implementation for azurelinux. Using default distro implementation instead.", + 'if': lambda r: DISTRO_NAME == 'azurelinux' and r.prefix == 'Daemon' and r.level == 'WARNING' + }, + # + # TODO: The OMS extension does not support Azure Linux 3; remove this message when it does. + # + # 2024-08-12T17:40:48.375193Z ERROR ExtHandler ExtHandler Event: name=Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux, op=Install, message=[ExtensionOperationError] Non-zero exit code: 51, /var/lib/waagent/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux-1.19.0/omsagent_shim.sh -install + # + { + 'message': r"name=Microsoft\.EnterpriseCloud\.Monitoring\.OmsAgentForLinux.+Non-zero exit code: 51", + 'if': lambda r: DISTRO_NAME == 'azurelinux' and DISTRO_VERSION == '3.0' + }, ] def is_error(r: AgentLogRecord) -> bool: diff --git a/tests_e2e/tests/lib/agent_test.py b/tests_e2e/tests/lib/agent_test.py index 0021a8d74b..e4f73d725f 100644 --- a/tests_e2e/tests/lib/agent_test.py +++ b/tests_e2e/tests/lib/agent_test.py @@ -73,6 +73,8 @@ def run_from_command_line(cls): """ Convenience method to execute the test when it is being invoked directly from the command line (as opposed as being invoked from a test framework or library.) + + TODO: Need to implement for reading test specific arguments from command line """ try: if issubclass(cls, AgentVmTest): diff --git a/tests_e2e/tests/lib/agent_update_helpers.py b/tests_e2e/tests/lib/agent_update_helpers.py new file mode 100644 index 0000000000..d48d47bf4e --- /dev/null +++ b/tests_e2e/tests/lib/agent_update_helpers.py @@ -0,0 +1,93 @@ +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json + +import requests +from azure.identity import DefaultAzureCredential +from msrestazure.azure_cloud import Cloud +from azure.mgmt.compute.models import VirtualMachine + +from tests_e2e.tests.lib.azure_clouds import AZURE_CLOUDS +from tests_e2e.tests.lib.logging import log +from tests_e2e.tests.lib.virtual_machine_client import VirtualMachineClient + +# Helper methods for agent update/publish tests + + +def verify_agent_update_flag_enabled(vm: VirtualMachineClient) -> bool: + result: VirtualMachine = vm.get_model() + flag: bool = result.os_profile.linux_configuration.enable_vm_agent_platform_updates + if flag is None: + return False + return flag + + +def enable_agent_update_flag(vm: VirtualMachineClient) -> None: + osprofile = { + "location": vm.location, # location is required field + "properties": { + "osProfile": { + "linuxConfiguration": { + "enableVMAgentPlatformUpdates": True + } + } + } + } + log.info("updating the vm with osProfile property:\n%s", osprofile) + vm.update(osprofile) + + +def request_rsm_update(requested_version: str, vm: VirtualMachineClient, arch_type) -> None: + """ + This method is to simulate the rsm request. + First we ensure the PlatformUpdates enabled in the vm and then make a request using rest api + """ + if not verify_agent_update_flag_enabled(vm): + # enable the flag + log.info("Attempting vm update to set the enableVMAgentPlatformUpdates flag") + enable_agent_update_flag(vm) + log.info("Updated the enableVMAgentPlatformUpdates flag to True") + else: + log.info("Already enableVMAgentPlatformUpdates flag set to True") + + cloud: Cloud = AZURE_CLOUDS[vm.cloud] + credential: DefaultAzureCredential = DefaultAzureCredential(authority=cloud.endpoints.active_directory) + token = credential.get_token(cloud.endpoints.resource_manager + "/.default") + headers = {'Authorization': 'Bearer ' + token.token, 'Content-Type': 'application/json'} + # Later this api call will be replaced by azure-python-sdk wrapper + base_url = cloud.endpoints.resource_manager + url = base_url + "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}/" \ + "UpgradeVMAgent?api-version=2022-08-01".format(vm.subscription, + vm.resource_group, + vm.name) + if arch_type == "aarch64": + data = { + "target": "Microsoft.OSTCLinuxAgent.ARM64Test", + "targetVersion": requested_version + } + else: + data = { + "target": "Microsoft.OSTCLinuxAgent.Test", + "targetVersion": requested_version, + } + + log.info("Attempting rsm upgrade post request to endpoint: {0} with data: {1}".format(url, data)) + response = requests.post(url, data=json.dumps(data), headers=headers, timeout=300) + if response.status_code == 202: + log.info("RSM upgrade request accepted") + else: + raise Exception("Error occurred while making RSM upgrade request. Status code : {0} and msg: {1}".format( + response.status_code, response.content)) \ No newline at end of file diff --git a/tests_e2e/tests/lib/cgroup_helpers.py b/tests_e2e/tests/lib/cgroup_helpers.py index 6da2865c21..c3bb468b02 100644 --- a/tests_e2e/tests/lib/cgroup_helpers.py +++ b/tests_e2e/tests/lib/cgroup_helpers.py @@ -1,3 +1,4 @@ +import datetime import os import re @@ -6,8 +7,10 @@ from azurelinuxagent.common.osutil import systemd from azurelinuxagent.common.utils import shellutil from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION +from azurelinuxagent.ga.cgroupapi import get_cgroup_api, SystemdCgroupApiv1 from tests_e2e.tests.lib.agent_log import AgentLog from tests_e2e.tests.lib.logging import log +from tests_e2e.tests.lib.retry import retry_if_false BASE_CGROUP = '/sys/fs/cgroup' AGENT_CGROUP_NAME = 'WALinuxAgent' @@ -93,23 +96,27 @@ def verify_agent_cgroup_assigned_correctly(): This method checks agent is running and assigned to the correct cgroup using service status output """ log.info("===== Verifying the daemon and the agent are assigned to the same correct cgroup using systemd") - service_status = shellutil.run_command(["systemctl", "status", systemd.get_agent_unit_name()]) - log.info("Agent service status output:\n%s", service_status) - is_active = False - is_cgroup_assigned = False cgroup_mount_path = get_agent_cgroup_mount_path() - is_active_pattern = re.compile(r".*Active:\s+active.*") + service_status = "" - for line in service_status.splitlines(): - if re.match(is_active_pattern, line): - is_active = True - elif cgroup_mount_path in line: - is_cgroup_assigned = True + def check_agent_service_cgroup(): + is_active = False + is_cgroup_assigned = False + service_status = shellutil.run_command(["systemctl", "status", systemd.get_agent_unit_name()]) + log.info("Agent service status output:\n%s", service_status) + is_active_pattern = re.compile(r".*Active:\s+active.*") - if not is_active: - fail('walinuxagent service was not active/running. Service status:{0}'.format(service_status)) - if not is_cgroup_assigned: - fail('walinuxagent service was not assigned to the expected cgroup:{0}'.format(cgroup_mount_path)) + for line in service_status.splitlines(): + if re.match(is_active_pattern, line): + is_active = True + elif cgroup_mount_path in line: + is_cgroup_assigned = True + + return is_active and is_cgroup_assigned + + # Test check can happen before correct cgroup assigned and relfected in service status. So, retrying the check for few times + if not retry_if_false(check_agent_service_cgroup): + fail('walinuxagent service was not assigned to the expected cgroup:{0}. Current agent status:{1}'.format(cgroup_mount_path, service_status)) log.info("Successfully verified the agent cgroup assigned correctly by systemd\n") @@ -141,10 +148,30 @@ def check_cgroup_disabled_with_unknown_process(): """ Returns True if the cgroup is disabled with unknown process """ + return check_log_message("Disabling resource usage monitoring. Reason: Check on cgroups failed:.+UNKNOWN") + + +def check_log_message(message, after_timestamp=datetime.datetime.min): + """ + Check if the log message is present after the given timestamp(if provided) in the agent log + """ + log.info("Checking log message: {0}".format(message)) for record in AgentLog().read(): - match = re.search("Disabling resource usage monitoring. Reason: Check on cgroups failed:.+UNKNOWN", - record.message, flags=re.DOTALL) - if match is not None: + match = re.search(message, record.message, flags=re.DOTALL) + if match is not None and record.timestamp > after_timestamp: log.info("Found message:\n\t%s", record.text.replace("\n", "\n\t")) return True return False + + +def get_unit_cgroup_proc_path(unit_name, controller): + """ + Returns the cgroup.procs path for the given unit and controller. + """ + cgroups_api = get_cgroup_api() + unit_cgroup = cgroups_api.get_unit_cgroup(unit_name=unit_name, cgroup_name="test cgroup") + if isinstance(cgroups_api, SystemdCgroupApiv1): + return unit_cgroup.get_controller_procs_path(controller=controller) + else: + return unit_cgroup.get_procs_path() + diff --git a/tests_e2e/tests/lib/resource_group_client.py b/tests_e2e/tests/lib/resource_group_client.py index 9ca07a2602..30f82ccec2 100644 --- a/tests_e2e/tests/lib/resource_group_client.py +++ b/tests_e2e/tests/lib/resource_group_client.py @@ -70,5 +70,11 @@ def delete(self) -> None: log.info("Deleting resource group %s (no wait)", self) self._resource_client.resource_groups.begin_delete(self.name) # Do not wait for the deletion to complete + def is_exists(self) -> bool: + """ + Checks if the resource group exists + """ + return self._resource_client.resource_groups.check_existence(self.name) + def __str__(self): return f"{self.name}" diff --git a/tests_e2e/tests/lib/retry.py b/tests_e2e/tests/lib/retry.py index db0a52fcf2..9c045ae74d 100644 --- a/tests_e2e/tests/lib/retry.py +++ b/tests_e2e/tests/lib/retry.py @@ -22,7 +22,8 @@ from tests_e2e.tests.lib.shell import CommandError -def execute_with_retry(operation: Callable[[], Any]) -> Any: +# R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements) +def execute_with_retry(operation: Callable[[], Any]) -> Any: # pylint: disable=inconsistent-return-statements """ Some Azure errors (e.g. throttling) are retryable; this method attempts the given operation retrying a few times (after a short delay) if the error includes the string "RetryableError" @@ -79,7 +80,8 @@ def retry_if_false(operation: Callable[[], bool], attempts: int = 5, delay: int return success -def retry(operation: Callable[[], Any], attempts: int = 5, delay: int = 30) -> Any: +# R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements) +def retry(operation: Callable[[], Any], attempts: int = 5, delay: int = 30) -> Any: # pylint: disable=inconsistent-return-statements """ This method attempts the given operation retrying a few times on exceptions. Returns the value returned by the operation. """ diff --git a/tests_e2e/tests/lib/virtual_machine_client.py b/tests_e2e/tests/lib/virtual_machine_client.py index 5d6e471b9c..c4181be5a2 100644 --- a/tests_e2e/tests/lib/virtual_machine_client.py +++ b/tests_e2e/tests/lib/virtual_machine_client.py @@ -171,7 +171,7 @@ def restart( instance_view = self.get_instance_view() power_state = [s.code for s in instance_view.statuses if "PowerState" in s.code] if len(power_state) != 1: - raise Exception(f"Could not find PowerState in the instance view statuses:\n{json.dumps(instance_view.statuses)}") + raise Exception(f"Could not find PowerState in the instance view statuses:\n{json.dumps(instance_view.serialize(), indent=2)}") log.info("VM's Power State: %s", power_state[0]) if power_state[0] == "PowerState/running": # We may get an instance view captured before the reboot actually happened; verify diff --git a/tests_e2e/tests/log_collector/log_collector.py b/tests_e2e/tests/log_collector/log_collector.py new file mode 100755 index 0000000000..fed8159c0f --- /dev/null +++ b/tests_e2e/tests/log_collector/log_collector.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import re +import time + +from assertpy import fail + +import tests_e2e.tests.lib.logging +from azurelinuxagent.common.utils.shellutil import CommandError +from tests_e2e.tests.lib.agent_test import AgentVmTest + + +class LogCollector(AgentVmTest): + """ + Tests that the log collector logs the expected behavior on periodic runs. + """ + def run(self): + ssh_client = self._context.create_ssh_client() + ssh_client.run_command("update-waagent-conf Logs.Collect=y Debug.EnableCgroupV2ResourceLimiting=y Debug.LogCollectorInitialDelay=60", use_sudo=True) + # Wait for log collector to finish uploading logs + for _ in range(3): + time.sleep(90) + try: + ssh_client.run_command("grep 'Successfully uploaded logs' /var/log/waagent.log") + break + except CommandError: + tests_e2e.tests.lib.logging.log.info("The Agent has not finished log collection, will check again after a short delay") + else: + raise Exception("Timeout while waiting for the Agent to finish log collection") + + # Get any agent logs between log collector start and finish + try: + # We match the first full log collector run in the agent log (this test just needs to validate any full log collector run, does not matter if it's the first or last) + lc_start_pattern = "INFO CollectLogsHandler ExtHandler Starting log collection" + lc_end_pattern = "INFO CollectLogsHandler ExtHandler Successfully uploaded logs" + output = ssh_client.run_command("sed -n '/{0}/,/{1}/{{p;/{1}/q}}' /var/log/waagent.log".format(lc_start_pattern, lc_end_pattern)).rstrip().splitlines() + except Exception as e: + raise Exception("Unable to get log collector logs from waagent.log: {0}".format(e)) + + # These logs indicate a successful log collector run with resource enforcement and monitoring + expected = [ + r'.*Starting log collection', + r'.*Using cgroup v\d for resource enforcement and monitoring', + r'.*cpu(,cpuacct)? controller for cgroup: azure-walinuxagent-logcollector \[\/sys\/fs\/cgroup(\/cpu,cpuacct)?\/azure.slice\/azure-walinuxagent.slice\/azure-walinuxagent\-logcollector.slice\/collect\-logs.scope\]', + r'.*memory controller for cgroup: azure-walinuxagent-logcollector \[\/sys\/fs\/cgroup(\/memory)?\/azure.slice\/azure-walinuxagent.slice\/azure-walinuxagent\-logcollector.slice\/collect\-logs.scope\]', + r'.*Log collection successfully completed', + r'.*Successfully collected logs', + r'.*Successfully uploaded logs' + ] + + # Filter output to only include relevant log collector logs + lc_logs = [log for log in output if len([pattern for pattern in expected if re.match(pattern, log)]) > 0] + + # Check that all expected logs exist and are in the correct order + indent = lambda lines: "\n".join([f" {ln}" for ln in lines]) + if len(lc_logs) == len(expected) and all([re.match(expected[i], lc_logs[i]) is not None for i in range(len(expected))]): + tests_e2e.tests.lib.logging.log.info("The log collector run completed as expected.\nLog messages:\n%s", indent(lc_logs)) + else: + fail(f"The log collector run did not complete as expected.\nExpected:\n{indent(expected)}\nActual:\n{indent(lc_logs)}") + + ssh_client.run_command("update-waagent-conf Debug.EnableCgroupV2ResourceLimiting=n Debug.LogCollectorInitialDelay=5*60", + use_sudo=True) + + +if __name__ == "__main__": + LogCollector.run_from_command_line() diff --git a/tests_e2e/tests/publish_hostname/publish_hostname.py b/tests_e2e/tests/publish_hostname/publish_hostname.py index 45a7be85f5..19f7b10b46 100644 --- a/tests_e2e/tests/publish_hostname/publish_hostname.py +++ b/tests_e2e/tests/publish_hostname/publish_hostname.py @@ -26,6 +26,7 @@ import datetime import re +from typing import List, Dict, Any from assertpy import fail from time import sleep @@ -105,7 +106,7 @@ def check_agent_reports_status(self): self._context.username, self._vm_password)) - def retry_ssh_if_connection_reset(self, command: str, use_sudo=False): + def retry_ssh_if_connection_reset(self, command: str, use_sudo=False): # pylint: disable=inconsistent-return-statements # The agent may bring the network down and back up to publish the hostname, which can reset the ssh connection. # Adding retry here for connection reset. retries = 3 @@ -204,6 +205,21 @@ def run(self): self.check_agent_reports_status() raise + def get_ignore_error_rules(self) -> List[Dict[str, Any]]: + ignore_rules = [ + # + # We may see temporary network unreachable warnings since we are bringing the network interface down + # + # 2024-02-16T09:27:14.114569Z WARNING MonitorHandler ExtHandler Error in SendHostPluginHeartbeat: [HttpError] [HTTP Failed] GET http://168.63.129.16:32526/health -- IOError [Errno 101] Network is unreachable -- 1 attempts made --- [NOTE: Will not log the same error for the next hour] + # 2024-02-28T05:37:55.562065Z ERROR ExtHandler ExtHandler Error fetching the goal state: [ProtocolError] GET vmSettings [correlation ID: 28de1093-ecb5-4515-ba8e-2ed0c7778e34 eTag: 4648629460326038775]: Request failed: [Errno 101] Network is unreachable + # 2024-02-29T09:30:40.702293Z ERROR ExtHandler ExtHandler Error fetching the goal state: [ProtocolError] [Wireserver Exception] [HttpError] [HTTP Failed] GET http://168.63.129.16/machine/ -- IOError [Errno 101] Network is unreachable -- 6 attempts made + # + { + 'message': r"GET (http://168.63.129.16:32526/health|vmSettings|http://168.63.129.16/machine).*\[Errno 101\] Network is unreachable", + } + ] + return ignore_rules + if __name__ == "__main__": PublishHostname.run_from_command_line() diff --git a/tests_e2e/tests/recover_network_interface/recover_network_interface.py b/tests_e2e/tests/recover_network_interface/recover_network_interface.py index 39799d3752..2d03077caf 100644 --- a/tests_e2e/tests/recover_network_interface/recover_network_interface.py +++ b/tests_e2e/tests/recover_network_interface/recover_network_interface.py @@ -91,19 +91,59 @@ def run(self): # The script should bring the primary network interface down and use the agent to recover the interface. These # commands will bring the network down, so they should be executed on the machine using CSE instead of ssh. script = f""" - set -euxo pipefail - ifdown {ifname}; - nic_state=$(nmcli -g general.state device show {ifname}) - echo Primary network interface state before recovering: $nic_state - source /home/{self._context.username}/bin/set-agent-env; - pypy3 -c 'from azurelinuxagent.common.osutil.redhat import RedhatOSUtil; RedhatOSUtil().check_and_recover_nic_state({formatted_ifname})'; - nic_state=$(nmcli -g general.state device show {ifname}); - echo Primary network interface state after recovering: $nic_state + set -uxo pipefail + + # The 'ifdown' network script is used to bring the network interface down. For some distros, this script + # executes nmcli commands which can timeout and return non-zero exit codes. Allow 3 retries in case 'ifdown' + # returns non-zero exit code. This is the same number of retries the agent allows in DefaultOSUtil.restart_if + retries=3; + ifdown_success=false + while [ $retries -gt 0 ] + do + echo Attempting to bring network interface down with ifdown... + ifdown {ifname}; + exit_code=$? + if [ $exit_code -eq 0 ]; then + echo ifdown succeeded + ifdown_success=true + break + fi + echo ifdown failed with exit code $exit_code, try again after 5 seconds... + sleep 5 + ((retries=retries-1)) + done + + # Verify the agent network interface recovery logic only if 'ifdown' succeeded + if ! $ifdown_success ; then + # Fail the script if 'ifdown' command didn't succeed + exit 1 + else + # Log the network interface state before attempting to recover the interface + nic_state=$(nmcli -g general.state device show {ifname}) + echo Primary network interface state before recovering: $nic_state + + # Use the agent OSUtil to bring the network interface back up + source /home/{self._context.username}/bin/set-agent-env; + echo Attempting to recover the network interface with the agent... + pypy3 -c 'from azurelinuxagent.common.osutil.redhat import RedhatOSUtil; RedhatOSUtil().check_and_recover_nic_state({formatted_ifname})'; + + # Log the network interface state after attempting to recover the interface + nic_state=$(nmcli -g general.state device show {ifname}); + echo Primary network interface state after recovering: $nic_state + fi """ log.info("") log.info("Using CSE to bring the primary network interface down and call the OSUtil to bring the interface back up. Command to execute: {0}".format(script)) custom_script = VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.CustomScript, resource_name="CustomScript") - custom_script.enable(protected_settings={'commandToExecute': script}, settings={}) + try: + custom_script.enable(protected_settings={'commandToExecute': script}, settings={}) + except TimeoutError: + # Custom script may timeout if attempt to recover the network interface was not successful. The agent won't + # be able to report status for the extension if network is down. Reboot the VM to bring the network back up + # so logs can be collected. + log.info("Custom script did not complete within the timeout. Rebooting the VM in attempt to bring the network interface back up...") + self._context.vm.restart(wait_for_boot=True, ssh_client=self._ssh_client) + fail("Custom script did not complete within the timoeut, which indicates the agent may be unable to report status due to network issues.") # Check that the interface was down and brought back up in instance view log.info("") diff --git a/tests_e2e/tests/scripts/agent_cgroups-check_cgroups_agent.py b/tests_e2e/tests/scripts/agent_cgroups-check_cgroups_agent.py index 2f3b877a0b..4f6444462c 100755 --- a/tests_e2e/tests/scripts/agent_cgroups-check_cgroups_agent.py +++ b/tests_e2e/tests/scripts/agent_cgroups-check_cgroups_agent.py @@ -27,6 +27,7 @@ verify_agent_cgroup_assigned_correctly from tests_e2e.tests.lib.logging import log from tests_e2e.tests.lib.remote_test import run_remote_test +from tests_e2e.tests.lib.retry import retry_if_false def verify_if_cgroup_controllers_are_mounted(): @@ -60,22 +61,26 @@ def verify_agent_cgroup_created_on_file_system(): """ log.info("===== Verifying the agent cgroup paths exist on file system") agent_cgroup_mount_path = get_agent_cgroup_mount_path() - all_agent_cgroup_controllers_path_exist = True + log.info("expected agent cgroup mount path: %s", agent_cgroup_mount_path) + missing_agent_cgroup_controllers_path = [] verified_agent_cgroup_controllers_path = [] - log.info("expected agent cgroup mount path: %s", agent_cgroup_mount_path) + def is_agent_cgroup_controllers_path_exist(): + all_controllers_path_exist = True - for controller in AGENT_CONTROLLERS: - agent_controller_path = os.path.join(BASE_CGROUP, controller, agent_cgroup_mount_path[1:]) + for controller in AGENT_CONTROLLERS: + agent_controller_path = os.path.join(BASE_CGROUP, controller, agent_cgroup_mount_path[1:]) - if not os.path.exists(agent_controller_path): - all_agent_cgroup_controllers_path_exist = False - missing_agent_cgroup_controllers_path.append(agent_controller_path) - else: - verified_agent_cgroup_controllers_path.append(agent_controller_path) + if not os.path.exists(agent_controller_path): + all_controllers_path_exist = False + missing_agent_cgroup_controllers_path.append(agent_controller_path) + else: + verified_agent_cgroup_controllers_path.append(agent_controller_path) + return all_controllers_path_exist - if not all_agent_cgroup_controllers_path_exist: + # Test check can happen before agent setup cgroup configuration. So, retrying the check for few times + if not retry_if_false(is_agent_cgroup_controllers_path_exist): fail("Agent's cgroup paths couldn't be found on file system. Missing agent cgroups path :{0}.\n Verified agent cgroups path:{1}".format(missing_agent_cgroup_controllers_path, verified_agent_cgroup_controllers_path)) log.info('Verified all agent cgroup paths are present.\n {0}'.format(verified_agent_cgroup_controllers_path)) @@ -90,14 +95,21 @@ def verify_agent_cgroups_tracked(): tracking_agent_cgroup_message_re = r'Started tracking cgroup [^\s]+\s+\[(?P[^\s]+)\]' tracked_cgroups = [] - for record in AgentLog().read(): - match = re.search(tracking_agent_cgroup_message_re, record.message) - if match is not None: - tracked_cgroups.append(match.group('path')) - - for controller in AGENT_CONTROLLERS: - if not any(AGENT_SERVICE_NAME in cgroup_path and controller in cgroup_path for cgroup_path in tracked_cgroups): - fail('Agent {0} is not being tracked. Tracked cgroups:{1}'.format(controller, tracked_cgroups)) + def is_agent_tracking_cgroup(): + tracked_cgroups.clear() + for record in AgentLog().read(): + match = re.search(tracking_agent_cgroup_message_re, record.message) + if match is not None: + tracked_cgroups.append(match.group('path')) + + for controller in AGENT_CONTROLLERS: + if not any(AGENT_SERVICE_NAME in cgroup_path and controller in cgroup_path for cgroup_path in tracked_cgroups): + return False + return True + # Test check can happen before agent starts tracking cgroups. So, retrying the check for few times + found = retry_if_false(is_agent_tracking_cgroup) + if not found: + fail('Agent {0} is not being tracked. Tracked cgroups:{1}'.format(AGENT_CONTROLLERS, tracked_cgroups)) log.info("Agent is tracking cgroups correctly.\n%s", tracked_cgroups) diff --git a/tests_e2e/tests/scripts/agent_cgroups_process_check-cgroups_not_enabled.py b/tests_e2e/tests/scripts/agent_cgroups_process_check-cgroups_not_enabled.py new file mode 100755 index 0000000000..a8db751e61 --- /dev/null +++ b/tests_e2e/tests/scripts/agent_cgroups_process_check-cgroups_not_enabled.py @@ -0,0 +1,60 @@ +#!/usr/bin/env pypy3 + +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This script verifies agent detected unexpected processes in the agent cgroup before cgroup initialization + +from assertpy import fail + +from azurelinuxagent.common.utils import shellutil +from tests_e2e.tests.lib.cgroup_helpers import check_agent_quota_disabled, check_log_message +from tests_e2e.tests.lib.logging import log +from tests_e2e.tests.lib.retry import retry_if_false + + +def restart_ext_handler(): + log.info("Restarting the extension handler") + shellutil.run_command(["pkill", "-f", "WALinuxAgent.*run-exthandler"]) + + +def verify_agent_cgroups_not_enabled(): + """ + Verifies that the agent cgroups not enabled when ama extension(unexpected) processes are found in the agent cgroup + """ + log.info("Verifying agent cgroups are not enabled") + + ama_process_found: bool = retry_if_false(lambda: check_log_message("The agent's cgroup includes unexpected processes:.+/var/lib/waagent/Microsoft.Azure.Monitor")) + if not ama_process_found: + fail("Agent failed to found ama extension processes in the agent cgroup") + + found: bool = retry_if_false(lambda: check_log_message("Found unexpected processes in the agent cgroup before agent enable cgroups")) + if not found: + fail("Agent failed to found unknown processes in the agent cgroup") + + disabled: bool = retry_if_false(check_agent_quota_disabled) + if not disabled: + fail("The agent failed to disable its CPUQuota when cgroups were not enabled") + + +def main(): + restart_ext_handler() + verify_agent_cgroups_not_enabled() + + +if __name__ == "__main__": + main() diff --git a/tests_e2e/tests/scripts/agent_cgroups_process_check-unknown_process_check.py b/tests_e2e/tests/scripts/agent_cgroups_process_check-unknown_process_check.py new file mode 100755 index 0000000000..fff5746cce --- /dev/null +++ b/tests_e2e/tests/scripts/agent_cgroups_process_check-unknown_process_check.py @@ -0,0 +1,96 @@ +#!/usr/bin/env pypy3 +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This script forces the process check by putting unknown process in the agent's cgroup + +import subprocess +import datetime + +from assertpy import fail + +from azurelinuxagent.common.utils import shellutil +from tests_e2e.tests.lib.cgroup_helpers import check_agent_quota_disabled, check_log_message, get_unit_cgroup_proc_path, AGENT_SERVICE_NAME +from tests_e2e.tests.lib.logging import log +from tests_e2e.tests.lib.retry import retry_if_false + + +def prepare_agent(): + check_time = datetime.datetime.utcnow() + log.info("Executing script update-waagent-conf to enable agent cgroups config flag") + result = shellutil.run_command(["update-waagent-conf", "Debug.CgroupCheckPeriod=20", "Debug.CgroupLogMetrics=y", + "Debug.CgroupDisableOnProcessCheckFailure=y", + "Debug.CgroupDisableOnQuotaCheckFailure=n"]) + log.info("Successfully enabled agent cgroups config flag: {0}".format(result)) + + found: bool = retry_if_false(lambda: check_log_message(" Agent cgroups enabled: True", after_timestamp=check_time)) + if not found: + fail("Agent cgroups not enabled") + + +def creating_dummy_process(): + log.info("Creating dummy process to add to agent's cgroup") + dd_command = ["sleep", "60m"] + proc = subprocess.Popen(dd_command) + return proc.pid + + +def remove_dummy_process(pid): + log.info("Removing dummy process from agent's cgroup") + shellutil.run_command(["kill", "-9", str(pid)]) + + +def disable_agent_cgroups_with_unknown_process(pid): + """ + Adding dummy process to the agent's cgroup and verifying that the agent detects the unknown process and disables cgroups + + Note: System may kick the added process out of the cgroups, keeps adding until agent detect that process + """ + + def unknown_process_found(): + cgroup_procs_path = get_unit_cgroup_proc_path(AGENT_SERVICE_NAME, 'cpu,cpuacct') + log.info("Adding dummy process %s to cgroup.procs file %s", pid, cgroup_procs_path) + try: + with open(cgroup_procs_path, 'a') as f: + f.write("\n") + f.write(str(pid)) + except Exception as e: + log.warning("Error while adding process to cgroup.procs file: {0}".format(e)) + return False + + # The log message indicating the check failed is similar to + # 2021-03-29T23:33:15.603530Z INFO MonitorHandler ExtHandler Disabling resource usage monitoring. Reason: Check on cgroups failed: + # [CGroupsException] The agent's cgroup includes unexpected processes: ['[PID: 25826] python3\x00/home/nam/Compute-Runtime-Tux-Pipeline/dungeon_crawler/s'] + found: bool = retry_if_false(lambda: check_log_message( + "Disabling resource usage monitoring. Reason: Check on cgroups failed:.+The agent's cgroup includes unexpected processes:.+{0}".format( + pid)), attempts=3) + return found and retry_if_false(check_agent_quota_disabled, attempts=3) + + found: bool = retry_if_false(unknown_process_found, attempts=3) + if not found: + fail("The agent did not detect unknown process: {0}".format(pid)) + + +def main(): + prepare_agent() + pid = creating_dummy_process() + disable_agent_cgroups_with_unknown_process(pid) + remove_dummy_process(pid) + + +if __name__ == "__main__": + main() diff --git a/tests_e2e/tests/scripts/agent_cpu_quota-check_agent_cpu_quota.py b/tests_e2e/tests/scripts/agent_cpu_quota-check_agent_cpu_quota.py index c8aad49f59..29758d02b7 100755 --- a/tests_e2e/tests/scripts/agent_cpu_quota-check_agent_cpu_quota.py +++ b/tests_e2e/tests/scripts/agent_cpu_quota-check_agent_cpu_quota.py @@ -30,7 +30,7 @@ from azurelinuxagent.ga.cgroupconfigurator import _DROP_IN_FILE_CPU_QUOTA from tests_e2e.tests.lib.agent_log import AgentLog from tests_e2e.tests.lib.cgroup_helpers import check_agent_quota_disabled, \ - get_agent_cpu_quota + get_agent_cpu_quota, check_log_message from tests_e2e.tests.lib.logging import log from tests_e2e.tests.lib.remote_test import run_remote_test from tests_e2e.tests.lib.retry import retry_if_false @@ -115,7 +115,7 @@ def check_agent_log_for_metrics() -> bool: if match is not None: processor_time.append(float(match.group(1))) else: - match = re.search(r"Throttled Time\s*\[walinuxagent.service\]\s*=\s*([0-9.]+)", record.message) + match = re.search(r"Throttled Time \(s\)\s*\[walinuxagent.service\]\s*=\s*([0-9.]+)", record.message) if match is not None: throttled_time.append(float(match.group(1))) if len(processor_time) < 1 or len(throttled_time) < 1: @@ -146,45 +146,18 @@ def wait_for_log_message(message, timeout=datetime.timedelta(minutes=5)): fail("The agent did not find [{0}] in its log within the allowed timeout".format(message)) -def verify_process_check_on_agent_cgroups(): - """ - This method checks agent detect unexpected processes in its cgroup and disables the CPUQuota - """ - log.info("***Verifying process check on agent cgroups") - log.info("Ensuring agent CPUQuota is enabled and backup the drop-in file to restore later in further tests") - if check_agent_quota_disabled(): - fail("The agent's CPUQuota is not enabled: {0}".format(get_agent_cpu_quota())) - quota_drop_in = os.path.join(systemd.get_agent_drop_in_path(), _DROP_IN_FILE_CPU_QUOTA) - quota_drop_in_backup = quota_drop_in + ".bk" - log.info("Backing up %s to %s...", quota_drop_in, quota_drop_in_backup) - shutil.copy(quota_drop_in, quota_drop_in_backup) - # - # Re-enable Process checks on cgroups and verify that the agent detects unexpected processes in its cgroup and disables the CPUQuota wehen - # that happens - # - shellutil.run_command(["update-waagent-conf", "Debug.CgroupDisableOnProcessCheckFailure=y"]) - - # The log message indicating the check failed is similar to - # 2021-03-29T23:33:15.603530Z INFO MonitorHandler ExtHandler Disabling resource usage monitoring. Reason: Check on cgroups failed: - # [CGroupsException] The agent's cgroup includes unexpected processes: ['[PID: 25826] python3\x00/home/nam/Compute-Runtime-Tux-Pipeline/dungeon_crawler/s'] - wait_for_log_message( - "Disabling resource usage monitoring. Reason: Check on cgroups failed:.+The agent's cgroup includes unexpected processes") - disabled: bool = retry_if_false(check_agent_quota_disabled) - if not disabled: - fail("The agent did not disable its CPUQuota: {0}".format(get_agent_cpu_quota())) - - def verify_throttling_time_check_on_agent_cgroups(): """ This method checks agent disables its CPUQuota when it exceeds its throttling limit """ log.info("***Verifying CPU throttling check on agent cgroups") # Now disable the check on unexpected processes and enable the check on throttledtime and verify that the agent disables its CPUQuota when it exceeds its throttling limit - log.info("Re-enabling CPUQuota...") + if check_agent_quota_disabled(): + fail("The agent's CPUQuota is not enabled: {0}".format(get_agent_cpu_quota())) quota_drop_in = os.path.join(systemd.get_agent_drop_in_path(), _DROP_IN_FILE_CPU_QUOTA) quota_drop_in_backup = quota_drop_in + ".bk" - log.info("Restoring %s from %s...", quota_drop_in, quota_drop_in_backup) - shutil.copy(quota_drop_in_backup, quota_drop_in) + log.info("Backing up %s to %s...", quota_drop_in, quota_drop_in_backup) + shutil.copy(quota_drop_in, quota_drop_in_backup) shellutil.run_command(["systemctl", "daemon-reload"]) shellutil.run_command(["update-waagent-conf", "Debug.CgroupDisableOnProcessCheckFailure=n", "Debug.CgroupDisableOnQuotaCheckFailure=y", "Debug.AgentCpuThrottledTimeThreshold=5"]) @@ -205,11 +178,27 @@ def verify_throttling_time_check_on_agent_cgroups(): fail("The agent did not disable its CPUQuota: {0}".format(get_agent_cpu_quota())) +def cleanup_test_setup(): + log.info("Cleaning up test setup") + drop_in_file = os.path.join(systemd.get_agent_drop_in_path(), "99-ExecStart.conf") + if os.path.exists(drop_in_file): + log.info("Removing %s...", drop_in_file) + os.remove(drop_in_file) + shellutil.run_command(["systemctl", "daemon-reload"]) + + check_time = datetime.datetime.utcnow() + shellutil.run_command(["agent-service", "restart"]) + + found: bool = retry_if_false(lambda: check_log_message(" Agent cgroups enabled: True", after_timestamp=check_time)) + if not found: + fail("Agent cgroups not enabled yet") + + def main(): prepare_agent() verify_agent_reported_metrics() - verify_process_check_on_agent_cgroups() verify_throttling_time_check_on_agent_cgroups() + cleanup_test_setup() run_remote_test(main) diff --git a/tests_e2e/tests/scripts/agent_cpu_quota-start_service.py b/tests_e2e/tests/scripts/agent_cpu_quota-start_service.py index ba0f5abb23..d595a66133 100755 --- a/tests_e2e/tests/scripts/agent_cpu_quota-start_service.py +++ b/tests_e2e/tests/scripts/agent_cpu_quota-start_service.py @@ -34,7 +34,7 @@ def __init__(self): self._stopped = False def run(self): - threading.current_thread().setName("*Stress*") + threading.current_thread().name = "*Stress*" while not self._stopped: try: @@ -47,15 +47,15 @@ def run(self): while i < 30 and not self._stopped: time.sleep(1) i += 1 - except Exception as exception: - logger.error("{0}:\n{1}", exception, traceback.format_exc()) + except Exception as run_exception: + logger.error("{0}:\n{1}", run_exception, traceback.format_exc()) def stop(self): self._stopped = True try: - threading.current_thread().setName("*StartService*") + threading.current_thread().name = "*StartService*" logger.set_prefix("E2ETest") logger.add_logger_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, "/var/log/waagent.log") diff --git a/tests_e2e/tests/scripts/agent_firewall-verify_all_firewall_rules.py b/tests_e2e/tests/scripts/agent_firewall-verify_all_firewall_rules.py index 2d165bc175..b19e5b6c1e 100755 --- a/tests_e2e/tests/scripts/agent_firewall-verify_all_firewall_rules.py +++ b/tests_e2e/tests/scripts/agent_firewall-verify_all_firewall_rules.py @@ -91,7 +91,8 @@ def delete_iptable_rules(commands: List[List[str]] = None) -> None: cmd = None for command in commands: cmd = command - retry(lambda: execute_cmd(cmd=cmd), attempts=3) + # W0640: Cell variable cmd defined in loop (cell-var-from-loop) + retry(lambda: execute_cmd(cmd=cmd), attempts=3) # pylint: disable=W0640 except Exception as e: raise Exception("Error -- Failed to Delete the ip table rule set {0}".format(e)) diff --git a/tests_e2e/tests/scripts/agent_persist_firewall-access_wireserver b/tests_e2e/tests/scripts/agent_persist_firewall-access_wireserver index c38e0a5706..e4afc406a5 100755 --- a/tests_e2e/tests/scripts/agent_persist_firewall-access_wireserver +++ b/tests_e2e/tests/scripts/agent_persist_firewall-access_wireserver @@ -19,6 +19,11 @@ # Helper script which tries to access Wireserver on system reboot. Also prints out iptable rules if non-root and still # able to access Wireserver +if [[ $# -ne 1 ]]; then + echo "Usage: agent_persist_firewall-access_wireserver " + exit 1 +fi +TEST_USER=$1 USER=$(whoami) echo "$(date --utc +%FT%T.%3NZ): Running as user: $USER" @@ -27,12 +32,25 @@ function check_online ping 8.8.8.8 -c 1 -i .2 -t 30 > /dev/null 2>&1 && echo 0 || echo 1 } +function ping_localhost +{ + ping 127.0.0.1 -c 1 -i .2 -t 30 > /dev/null 2>&1 && echo 0 || echo 1 +} + +function socket_connection +{ + output=$(python3 /home/"$TEST_USER"/bin/agent_persist_firewall-check_connectivity.py 2>&1) + echo $output +} + # Check more, sleep less MAX_CHECKS=10 # Initial starting value for checks CHECKS=0 IS_ONLINE=$(check_online) +echo "Checking network connectivity..." +echo "Running ping to 8.8.8.8 option" # Loop while we're not online. while [ "$IS_ONLINE" -eq 1 ]; do @@ -48,6 +66,19 @@ while [ "$IS_ONLINE" -eq 1 ]; do done +# logging other options output to compare and evaluate which option is more stable when ping to 8.8.8.8 failed +if [ "$IS_ONLINE" -eq 1 ]; then + echo "Checking other options to see if network is accessible" + echo "Running ping to localhost option" + PING_LOCAL=$(ping_localhost) + if [ "$PING_LOCAL" -eq 1 ]; then + echo "Ping to localhost failed" + else + echo "Ping to localhost succeeded" + fi + echo "Running socket connection to wireserver:53 option" + socket_connection +fi if [ "$IS_ONLINE" -eq 1 ]; then # We will never be able to get online. Kill script. echo "Unable to connect to network, exiting now" @@ -60,7 +91,7 @@ echo "Trying to contact Wireserver as $USER to see if accessible" echo "" echo "IPTables before accessing Wireserver" -sudo iptables -t security -L -nxv +sudo iptables -t security -L -nxv -w echo "" WIRE_IP=$(cat /var/lib/waagent/WireServerEndpoint 2>/dev/null || echo '168.63.129.16' | tr -d '[:space:]') diff --git a/tests_e2e/tests/scripts/agent_persist_firewall-check_connectivity.py b/tests_e2e/tests/scripts/agent_persist_firewall-check_connectivity.py new file mode 100755 index 0000000000..523109dc49 --- /dev/null +++ b/tests_e2e/tests/scripts/agent_persist_firewall-check_connectivity.py @@ -0,0 +1,30 @@ +import socket +import sys + +WIRESERVER_ENDPOINT_FILE = '/var/lib/waagent/WireServerEndpoint' +WIRESERVER_IP = '168.63.129.16' + + +def get_wireserver_ip() -> str: + try: + with open(WIRESERVER_ENDPOINT_FILE, 'r') as f: + wireserver_ip = f.read() + except Exception: + wireserver_ip = WIRESERVER_IP + return wireserver_ip + + +def main(): + try: + wireserver_ip = get_wireserver_ip() + socket.setdefaulttimeout(3) + socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((wireserver_ip, 53)) + + print('Socket connection to wire server:53 success') + except: # pylint: disable=W0702 + print('Socket connection to wire server:53 failed') + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tests_e2e/tests/scripts/agent_persist_firewall-test_setup b/tests_e2e/tests/scripts/agent_persist_firewall-test_setup index a157e58cbe..2784158a4b 100755 --- a/tests_e2e/tests/scripts/agent_persist_firewall-test_setup +++ b/tests_e2e/tests/scripts/agent_persist_firewall-test_setup @@ -25,6 +25,6 @@ if [[ $# -ne 1 ]]; then exit 1 fi -echo "@reboot /home/$1/bin/agent_persist_firewall-access_wireserver > /tmp/reboot-cron-root.log 2>&1" | crontab -u root - -echo "@reboot /home/$1/bin/agent_persist_firewall-access_wireserver > /tmp/reboot-cron-$1.log 2>&1" | crontab -u $1 - +echo "@reboot /home/$1/bin/agent_persist_firewall-access_wireserver $1 > /tmp/reboot-cron-root.log 2>&1" | crontab -u root - +echo "@reboot /home/$1/bin/agent_persist_firewall-access_wireserver $1 > /tmp/reboot-cron-$1.log 2>&1" | crontab -u $1 - update-waagent-conf OS.EnableFirewall=y \ No newline at end of file diff --git a/tests_e2e/tests/scripts/agent_persist_firewall-verify_firewalld_rules_readded.py b/tests_e2e/tests/scripts/agent_persist_firewall-verify_firewalld_rules_readded.py index 5cec654a16..f0c639b2ef 100755 --- a/tests_e2e/tests/scripts/agent_persist_firewall-verify_firewalld_rules_readded.py +++ b/tests_e2e/tests/scripts/agent_persist_firewall-verify_firewalld_rules_readded.py @@ -43,7 +43,8 @@ def delete_firewalld_rules(commands=None): cmd = None for command in commands: cmd = command - retry(lambda: execute_cmd(cmd=cmd), attempts=3) + # W0640: Cell variable cmd defined in loop (cell-var-from-loop) + retry(lambda: execute_cmd(cmd=cmd), attempts=3) # pylint: disable=W0640 except Exception as e: raise Exception("Error -- Failed to Delete the firewalld rule set {0}".format(e)) diff --git a/tests_e2e/tests/scripts/agent_publish-check_update.py b/tests_e2e/tests/scripts/agent_publish-check_update.py index 9be73ea097..ab5eb73569 100755 --- a/tests_e2e/tests/scripts/agent_publish-check_update.py +++ b/tests_e2e/tests/scripts/agent_publish-check_update.py @@ -1,5 +1,5 @@ #!/usr/bin/env pypy3 - +import argparse # Microsoft Azure Linux Agent # # Copyright 2018 Microsoft Corporation @@ -67,7 +67,7 @@ _RUNNING_PATTERN_00 = re.compile(r'.*Agent\sWALinuxAgent-(\S*)\sis running as the goal state agent') -def verify_agent_update_from_log(): +def verify_agent_update_from_log(published_version: str) -> bool: exit_code = 0 detected_update = False @@ -83,13 +83,14 @@ def verify_agent_update_from_log(): for p in [_UPDATE_PATTERN_00, _UPDATE_PATTERN_01, _UPDATE_PATTERN_02, _UPDATE_PATTERN_03, _UPDATE_PATTERN_04]: update_match = re.match(p, record.message) if update_match: - detected_update = True update_version = update_match.groups()[2] - log.info('found the agent update log: %s', record.text) - break + if update_version == published_version: + detected_update = True + log.info('found the agent update log: %s', record.text) + break if detected_update: - running_match = re.match(_RUNNING_PATTERN_00, record.text) + running_match = re.match(_RUNNING_PATTERN_00, record.message) if running_match and update_version == running_match.groups()[0]: update_successful = True log.info('found the agent started new version log: %s', record.text) @@ -102,7 +103,7 @@ def verify_agent_update_from_log(): log.warning('update was not successful') exit_code = 1 else: - log.warning('update was not detected') + log.warning('update was not detected for version: %s', published_version) exit_code = 1 return exit_code == 0 @@ -110,7 +111,10 @@ def verify_agent_update_from_log(): # This method will trace agent update messages in the agent log and determine if the update was successful or not. def main(): - found: bool = retry_if_false(verify_agent_update_from_log) + parser = argparse.ArgumentParser() + parser.add_argument('-p', '--published-version', required=True) + args = parser.parse_args() + found: bool = retry_if_false(lambda: verify_agent_update_from_log(args.published_version)) if not found: fail('update was not found in the logs') diff --git a/tests_e2e/tests/scripts/agent_update-self_update_latest_version.py b/tests_e2e/tests/scripts/agent_update-self_update_latest_version.py index 4be0f0dc3d..004011deca 100755 --- a/tests_e2e/tests/scripts/agent_update-self_update_latest_version.py +++ b/tests_e2e/tests/scripts/agent_update-self_update_latest_version.py @@ -19,20 +19,22 @@ # returns the agent latest version published # +import argparse + from azurelinuxagent.common.protocol.goal_state import GoalStateProperties from azurelinuxagent.common.protocol.util import get_protocol_util from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from tests_e2e.tests.lib.retry import retry -def get_agent_family_manifest(goal_state): +def get_agent_family_manifest(goal_state, family_type): """ - Get the agent_family from last GS for Test Family + Get the agent_family from last GS for given Family """ agent_families = goal_state.extensions_goal_state.agent_families agent_family_manifests = [] for m in agent_families: - if m.name == 'Test': + if m.name == family_type: if len(m.uris) > 0: agent_family_manifests.append(m) return agent_family_manifests[0] @@ -53,11 +55,14 @@ def get_largest_version(agent_manifest): def main(): try: + parser = argparse.ArgumentParser() + parser.add_argument('--family_type', dest="family_type", default="Test") + args = parser.parse_args() protocol = get_protocol_util().get_protocol(init_goal_state=False) retry(lambda: protocol.client.reset_goal_state( goal_state_properties=GoalStateProperties.ExtensionsGoalState)) goal_state = protocol.client.get_goal_state() - agent_family = get_agent_family_manifest(goal_state) + agent_family = get_agent_family_manifest(goal_state, args.family_type) agent_manifest = goal_state.fetch_agent_manifest(agent_family.name, agent_family.uris) largest_version = get_largest_version(agent_manifest) print(str(largest_version)) diff --git a/tests_e2e/tests/scripts/agent_update-self_update_test_setup b/tests_e2e/tests/scripts/agent_update-self_update_test_setup index 512beb322b..22a0f4becb 100755 --- a/tests_e2e/tests/scripts/agent_update-self_update_test_setup +++ b/tests_e2e/tests/scripts/agent_update-self_update_test_setup @@ -61,10 +61,13 @@ if [ "$#" -ne 0 ] || [ -z ${package+x} ] || [ -z ${version+x} ]; then fi echo "updating the related to self-update flags" -update-waagent-conf AutoUpdate.UpdateToLatestVersion=$update_to_latest_version Debug.EnableGAVersioning=n Debug.SelfUpdateHotfixFrequency=120 Debug.SelfUpdateRegularFrequency=120 Autoupdate.Frequency=120 +update-waagent-conf AutoUpdate.UpdateToLatestVersion=$update_to_latest_version AutoUpdate.GAFamily=Test Debug.EnableGAVersioning=n Debug.SelfUpdateHotfixFrequency=120 Debug.SelfUpdateRegularFrequency=120 Autoupdate.Frequency=120 agent-service stop mv /var/log/waagent.log /var/log/waagent.$(date --iso-8601=seconds).log +# Some distros may pre-install higher version than custom version that test installs, so we need to lower the version to install custom version +agent_update-modify_agent_version 2.2.53 + echo "Cleaning up the existing agents" rm -rf /var/lib/waagent/WALinuxAgent-* diff --git a/tests_e2e/tests/scripts/ext_cgroups-check_cgroups_extensions.py b/tests_e2e/tests/scripts/ext_cgroups-check_cgroups_extensions.py index 48bd3f902e..8d97da3f79 100755 --- a/tests_e2e/tests/scripts/ext_cgroups-check_cgroups_extensions.py +++ b/tests_e2e/tests/scripts/ext_cgroups-check_cgroups_extensions.py @@ -29,6 +29,7 @@ print_cgroups from tests_e2e.tests.lib.logging import log from tests_e2e.tests.lib.remote_test import run_remote_test +from tests_e2e.tests.lib.retry import retry_if_false def verify_custom_script_cgroup_assigned_correctly(): @@ -218,7 +219,7 @@ def main(): run_remote_test(main) except Exception as e: # It is possible that agent cgroup can be disabled due to UNKNOWN process or throttled before we run this check, in that case, we should ignore the validation - if check_agent_quota_disabled() and check_cgroup_disabled_with_unknown_process(): + if check_cgroup_disabled_with_unknown_process() and retry_if_false(check_agent_quota_disabled()): log.info("Cgroup is disabled due to UNKNOWN process, ignoring ext cgroups validations") else: raise diff --git a/tests_e2e/tests/scripts/ext_sequencing-get_ext_enable_time.py b/tests_e2e/tests/scripts/ext_sequencing-get_ext_enable_time.py index f65da676be..32bf7bd8bc 100755 --- a/tests_e2e/tests/scripts/ext_sequencing-get_ext_enable_time.py +++ b/tests_e2e/tests/scripts/ext_sequencing-get_ext_enable_time.py @@ -20,68 +20,42 @@ # import argparse -import json -import os +import re import sys +from datetime import datetime -from pathlib import Path +from tests_e2e.tests.lib.agent_log import AgentLog def main(): """ - Returns the timestamp of when the provided extension was enabled + Searches the agent log after the provided timestamp to determine when the agent enabled the provided extension. """ parser = argparse.ArgumentParser() parser.add_argument("--ext", dest='ext', required=True) + parser.add_argument("--after_time", dest='after_time', required=True) args, _ = parser.parse_known_args() - # Extension enabled time is in extension extension status file - ext_dirs = [item for item in os.listdir(Path('/var/lib/waagent')) if item.startswith(args.ext)] - if not ext_dirs: - print("Extension {0} directory does not exist".format(args.ext), file=sys.stderr) - sys.exit(1) - ext_status_path = Path('/var/lib/waagent/' + ext_dirs[0] + '/status') - ext_status_files = os.listdir(ext_status_path) - ext_status_files.sort() - if not ext_status_files: - # Extension did not report a status - print("Extension {0} did not report a status".format(args.ext), file=sys.stderr) - sys.exit(1) - latest_ext_status_path = os.path.join(ext_status_path, ext_status_files[-1]) - ext_status_file = open(latest_ext_status_path, 'r') - ext_status = json.loads(ext_status_file.read()) - - # Example status file - # [ - # { - # "status": { - # "status": "success", - # "formattedMessage": { - # "lang": "en-US", - # "message": "Enable succeeded" - # }, - # "operation": "Enable", - # "code": "0", - # "name": "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent" - # }, - # "version": "1.0", - # "timestampUTC": "2023-12-12T23:14:45Z" - # } - # ] - msg = "" - if len(ext_status) == 0 or not ext_status[0]['status']: - msg = "Extension {0} did not report a status".format(args.ext) - elif not ext_status[0]['status']['operation'] or ext_status[0]['status']['operation'] != 'Enable': - msg = "Extension {0} did not report a status for enable operation".format(args.ext) - elif ext_status[0]['status']['status'] != 'success': - msg = "Extension {0} did not report success for the enable operation".format(args.ext) - elif not ext_status[0]['timestampUTC']: - msg = "Extension {0} did not report the time the enable operation succeeded".format(args.ext) - else: - print(ext_status[0]['timestampUTC']) - sys.exit(0) - - print(msg, file=sys.stderr) + # Only search the agent log after the provided timestamp: args.after_time + after_time = datetime.strptime(args.after_time, u'%Y-%m-%d %H:%M:%S') + # Agent logs for extension enable: 2024-02-09T09:29:08.943529Z INFO ExtHandler [Microsoft.Azure.Extensions.CustomScript-2.1.10] Enable extension: [bin/custom-script-shim enable] + enable_log_regex = r"\[{0}-[.\d]+\] Enable extension: .*".format(args.ext) + + agent_log = AgentLog() + try: + for agent_record in agent_log.read(): + if agent_record.timestamp >= after_time: + # The agent_record prefix for enable logs is the extension name, for example: [Microsoft.Azure.Extensions.CustomScript-2.1.10] + if agent_record.prefix is not None: + ext_enabled = re.match(enable_log_regex, " ".join([agent_record.prefix, agent_record.message])) + + if ext_enabled is not None: + print(agent_record.when) + sys.exit(0) + except IOError as e: + print("Error when parsing agent log: {0}".format(str(e))) + + print("Extension {0} was not enabled after {1}".format(args.ext, args.after_time), file=sys.stderr) sys.exit(1) diff --git a/tests_e2e/tests/scripts/initial_agent_update-agent_update_check_from_log.py b/tests_e2e/tests/scripts/initial_agent_update-agent_update_check_from_log.py new file mode 100755 index 0000000000..3ae62fb30f --- /dev/null +++ b/tests_e2e/tests/scripts/initial_agent_update-agent_update_check_from_log.py @@ -0,0 +1,62 @@ +#!/usr/bin/env pypy3 + +# Microsoft Azure Linux Agent +# +# Copyright 2018 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Checks that the initial agent update happens with self-update before processing goal state from the agent log + +import argparse +import datetime +import re + +from assertpy import fail + +from tests_e2e.tests.lib.agent_log import AgentLog +from tests_e2e.tests.lib.logging import log + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--current_version", dest='current_version', required=True) + parser.add_argument("--latest_version", dest='latest_version', required=True) + args = parser.parse_args() + + agentlog = AgentLog() + patterns = { + "goal_state": "ProcessExtensionsGoalState started", + "self_update": f"Self-update is ready to upgrade the new agent: {args.latest_version} now before processing the goal state", + "exit_process": f"Current Agent {args.current_version} completed all update checks, exiting current process to upgrade to the new Agent version {args.latest_version}" + } + first_occurrence_times = {"goal_state": datetime.time.min, "self_update": datetime.time.min, "exit_process": datetime.time.min} + + for record in agentlog.read(): + for key, pattern in patterns.items(): + # Skip if we already found the first occurrence of the pattern + if first_occurrence_times[key] != datetime.time.min: + continue + if re.search(pattern, record.message, flags=re.DOTALL): + log.info(f"Found data: {record} in agent log") + first_occurrence_times[key] = record.when + break + + if first_occurrence_times["self_update"] < first_occurrence_times["goal_state"] and first_occurrence_times["exit_process"] < first_occurrence_times["goal_state"]: + log.info("Verified initial agent update happened before processing goal state") + else: + fail(f"Agent initial update didn't happen before processing goal state and first_occurrence_times for patterns: {patterns} are: {first_occurrence_times}") + + +if __name__ == '__main__': + main()