diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index fdcc07c95..9ac83e6c6 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -12,6 +12,7 @@ This will expedite the process of getting your pull request merged and avoid ext
---
### PR information
+- [ ] Ensure development PR is based on the `develop` branch.
- [ ] The title of the PR is clear and informative.
- [ ] There are a small number of commits, each of which has an informative message. This means that previously merged commits do not appear in the history of the PR. For information on cleaning up the commits in your pull request, [see this page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md).
- [ ] If applicable, the PR references the bug/issue that it fixes in the description.
diff --git a/.github/workflows/ci_pr.yml b/.github/workflows/ci_pr.yml
index fd8d91a38..9ae44ec8c 100644
--- a/.github/workflows/ci_pr.yml
+++ b/.github/workflows/ci_pr.yml
@@ -29,7 +29,8 @@ jobs:
env:
NOSEOPTS: "--verbose"
-
+ ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true
+
steps:
- uses: actions/checkout@v3
@@ -87,6 +88,9 @@ jobs:
matrix:
include:
- python-version: "3.5"
+ # workaround found in https://github.com/actions/setup-python/issues/866
+ # for issue "[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:728)" on Python 3.5
+ pip_trusted_host: "pypi.python.org pypi.org files.pythonhosted.org"
- python-version: "3.6"
- python-version: "3.7"
- python-version: "3.8"
@@ -110,6 +114,8 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
+ env:
+ PIP_TRUSTED_HOST: ${{ matrix.pip_trusted_host }}
- name: Install dependencies
id: install-dependencies
@@ -142,9 +148,14 @@ jobs:
# * 'contextmanager-generator-missing-cleanup' are false positives if yield is used inside an if-else block for contextmanager generator functions.
# (https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/contextmanager-generator-missing-cleanup.html).
# This is not implemented on versions (3.0-3.7) Bad option value 'contextmanager-generator-missing-cleanup' (bad-option-value)
+ # * 3.9-3.11 will produce "too-many-positional-arguments" for several methods that are having more than 5 args, so we suppress that warning.
+ # (R0917: Too many positional arguments (8/5) (too-many-positional-arguments))
PYLINT_OPTIONS="--rcfile=ci/pylintrc --jobs=0"
if [[ "${{ matrix.python-version }}" == "3.9" ]]; then
- PYLINT_OPTIONS="$PYLINT_OPTIONS --disable=no-member --ignore=main.py"
+ PYLINT_OPTIONS="$PYLINT_OPTIONS --disable=no-member,too-many-positional-arguments --ignore=main.py"
+ fi
+ if [[ "${{ matrix.python-version }}" =~ ^3\.(10|11)$ ]]; then
+ PYLINT_OPTIONS="$PYLINT_OPTIONS --disable=too-many-positional-arguments"
fi
if [[ "${{ matrix.python-version }}" =~ ^3\.[0-7]$ ]]; then
PYLINT_OPTIONS="$PYLINT_OPTIONS --disable=no-self-use,bad-option-value"
diff --git a/azurelinuxagent/agent.py b/azurelinuxagent/agent.py
index c0ebdbb42..bfb795c6b 100644
--- a/azurelinuxagent/agent.py
+++ b/azurelinuxagent/agent.py
@@ -23,6 +23,7 @@
from __future__ import print_function
+import json
import os
import re
import subprocess
@@ -31,7 +32,8 @@
from azurelinuxagent.common.exception import CGroupsException
from azurelinuxagent.ga import logcollector, cgroupconfigurator
-from azurelinuxagent.ga.cgroup import AGENT_LOG_COLLECTOR, CpuCgroup, MemoryCgroup
+from azurelinuxagent.ga.cgroupcontroller import AGENT_LOG_COLLECTOR
+from azurelinuxagent.ga.cpucontroller import _CpuController
from azurelinuxagent.ga.cgroupapi import get_cgroup_api, log_cgroup_warning, InvalidCgroupMountpointException
import azurelinuxagent.common.conf as conf
@@ -208,8 +210,7 @@ def collect_logs(self, is_full_mode):
# Check the cgroups unit
log_collector_monitor = None
- cpu_cgroup_path = None
- memory_cgroup_path = None
+ tracked_controllers = []
if CollectLogsHandler.is_enabled_monitor_cgroups_check():
try:
cgroup_api = get_cgroup_api()
@@ -220,44 +221,46 @@ def collect_logs(self, is_full_mode):
log_cgroup_warning("Unable to determine which cgroup version to use: {0}".format(ustr(e)), send_event=True)
sys.exit(logcollector.INVALID_CGROUPS_ERRCODE)
- cpu_cgroup_path, memory_cgroup_path = cgroup_api.get_process_cgroup_paths("self")
- cpu_slice_matches = False
- memory_slice_matches = False
- if cpu_cgroup_path is not None:
- cpu_slice_matches = (cgroupconfigurator.LOGCOLLECTOR_SLICE in cpu_cgroup_path)
- if memory_cgroup_path is not None:
- memory_slice_matches = (cgroupconfigurator.LOGCOLLECTOR_SLICE in memory_cgroup_path)
-
- if not cpu_slice_matches or not memory_slice_matches:
- log_cgroup_warning("The Log Collector process is not in the proper cgroups:", send_event=False)
- if not cpu_slice_matches:
- log_cgroup_warning("\tunexpected cpu slice: {0}".format(cpu_cgroup_path), send_event=False)
- if not memory_slice_matches:
- log_cgroup_warning("\tunexpected memory slice: {0}".format(memory_cgroup_path), send_event=False)
+ log_collector_cgroup = cgroup_api.get_process_cgroup(process_id="self", cgroup_name=AGENT_LOG_COLLECTOR)
+ tracked_controllers = log_collector_cgroup.get_controllers()
+ if len(tracked_controllers) != len(log_collector_cgroup.get_supported_controller_names()):
+ log_cgroup_warning("At least one required controller is missing. The following controllers are required for the log collector to run: {0}".format(log_collector_cgroup.get_supported_controller_names()))
sys.exit(logcollector.INVALID_CGROUPS_ERRCODE)
- def initialize_cgroups_tracking(cpu_cgroup_path, memory_cgroup_path):
- cpu_cgroup = CpuCgroup(AGENT_LOG_COLLECTOR, cpu_cgroup_path)
- msg = "Started tracking cpu cgroup {0}".format(cpu_cgroup)
- logger.info(msg)
- cpu_cgroup.initialize_cpu_usage()
- memory_cgroup = MemoryCgroup(AGENT_LOG_COLLECTOR, memory_cgroup_path)
- msg = "Started tracking memory cgroup {0}".format(memory_cgroup)
- logger.info(msg)
- return [cpu_cgroup, memory_cgroup]
+ if not log_collector_cgroup.check_in_expected_slice(cgroupconfigurator.LOGCOLLECTOR_SLICE):
+ log_cgroup_warning("The Log Collector process is not in the proper cgroups", send_event=False)
+ sys.exit(logcollector.INVALID_CGROUPS_ERRCODE)
try:
log_collector = LogCollector(is_full_mode)
- # Running log collector resource(CPU, Memory) monitoring only if agent starts the log collector.
+ # Running log collector resource monitoring only if agent starts the log collector.
# If Log collector start by any other means, then it will not be monitored.
if CollectLogsHandler.is_enabled_monitor_cgroups_check():
- tracked_cgroups = initialize_cgroups_tracking(cpu_cgroup_path, memory_cgroup_path)
- log_collector_monitor = get_log_collector_monitor_handler(tracked_cgroups)
+ for controller in tracked_controllers:
+ if isinstance(controller, _CpuController):
+ controller.initialize_cpu_usage()
+ break
+ log_collector_monitor = get_log_collector_monitor_handler(tracked_controllers)
log_collector_monitor.run()
- archive = log_collector.collect_logs_and_get_archive()
+
+ archive, total_uncompressed_size = log_collector.collect_logs_and_get_archive()
logger.info("Log collection successfully completed. Archive can be found at {0} "
"and detailed log output can be found at {1}".format(archive, OUTPUT_RESULTS_FILE_PATH))
+
+ if log_collector_monitor is not None:
+ log_collector_monitor.stop()
+ try:
+ metrics_summary = log_collector_monitor.get_max_recorded_metrics()
+ metrics_summary['Total Uncompressed File Size (B)'] = total_uncompressed_size
+ msg = json.dumps(metrics_summary)
+ logger.info(msg)
+ event.add_event(op=event.WALAEventOperation.LogCollection, message=msg, log_event=False)
+ except Exception as e:
+ msg = "An error occurred while reporting log collector resource usage summary: {0}".format(ustr(e))
+ logger.warn(msg)
+ event.add_event(op=event.WALAEventOperation.LogCollection, is_success=False, message=msg, log_event=False)
+
except Exception as e:
logger.error("Log collection completed unsuccessfully. Error: {0}".format(ustr(e)))
logger.info("Detailed log output can be found at {0}".format(OUTPUT_RESULTS_FILE_PATH))
diff --git a/azurelinuxagent/common/agent_supported_feature.py b/azurelinuxagent/common/agent_supported_feature.py
index 694c63639..f22a72ea6 100644
--- a/azurelinuxagent/common/agent_supported_feature.py
+++ b/azurelinuxagent/common/agent_supported_feature.py
@@ -77,14 +77,15 @@ def __init__(self):
class _GAVersioningGovernanceFeature(AgentSupportedFeature):
"""
CRP would drive the RSM update if agent reports that it does support RSM upgrades with this flag otherwise CRP fallback to largest version.
- Agent doesn't report supported feature flag if auto update is disabled or old version of agent running that doesn't understand GA versioning.
+ Agent doesn't report supported feature flag if auto update is disabled or old version of agent running that doesn't understand GA versioning
+ or if explicitly support for versioning is disabled in agent
Note: Especially Windows need this flag to report to CRP that GA doesn't support the updates. So linux adopted same flag to have a common solution.
"""
__NAME = SupportedFeatureNames.GAVersioningGovernance
__VERSION = "1.0"
- __SUPPORTED = conf.get_auto_update_to_latest_version()
+ __SUPPORTED = conf.get_auto_update_to_latest_version() and conf.get_enable_ga_versioning()
def __init__(self):
super(_GAVersioningGovernanceFeature, self).__init__(name=self.__NAME,
diff --git a/azurelinuxagent/common/conf.py b/azurelinuxagent/common/conf.py
index 85a7bc2f7..cd5c9a2d7 100644
--- a/azurelinuxagent/common/conf.py
+++ b/azurelinuxagent/common/conf.py
@@ -146,7 +146,9 @@ def load_conf_from_file(conf_file_path, conf=__conf__):
"Debug.CgroupDisableOnQuotaCheckFailure": True,
"Debug.EnableAgentMemoryUsageCheck": False,
"Debug.EnableFastTrack": True,
- "Debug.EnableGAVersioning": True
+ "Debug.EnableGAVersioning": True,
+ "Debug.EnableCgroupV2ResourceLimiting": False,
+ "Debug.EnableExtensionPolicy": False
}
@@ -168,9 +170,7 @@ def load_conf_from_file(conf_file_path, conf=__conf__):
"ResourceDisk.MountPoint": "/mnt/resource",
"ResourceDisk.MountOptions": None,
"ResourceDisk.Filesystem": "ext3",
- "AutoUpdate.GAFamily": "Prod",
- "Debug.CgroupMonitorExpiryTime": "2022-03-31",
- "Debug.CgroupMonitorExtensionName": "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent",
+ "AutoUpdate.GAFamily": "Prod"
}
@@ -200,7 +200,8 @@ def load_conf_from_file(conf_file_path, conf=__conf__):
"Debug.EtpCollectionPeriod": 300,
"Debug.AutoUpdateHotfixFrequency": 14400,
"Debug.AutoUpdateNormalFrequency": 86400,
- "Debug.FirewallRulesLogPeriod": 86400
+ "Debug.FirewallRulesLogPeriod": 86400,
+ "Debug.LogCollectorInitialDelay": 5 * 60
}
@@ -613,25 +614,6 @@ def get_enable_agent_memory_usage_check(conf=__conf__):
"""
return conf.get_switch("Debug.EnableAgentMemoryUsageCheck", False)
-
-def get_cgroup_monitor_expiry_time(conf=__conf__):
- """
- cgroups monitoring for pilot extensions disabled after expiry time
-
- NOTE: This option is experimental and may be removed in later versions of the Agent.
- """
- return conf.get("Debug.CgroupMonitorExpiryTime", "2022-03-31")
-
-
-def get_cgroup_monitor_extension_name (conf=__conf__):
- """
- cgroups monitoring extension name
-
- NOTE: This option is experimental and may be removed in later versions of the Agent.
- """
- return conf.get("Debug.CgroupMonitorExtensionName", "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent")
-
-
def get_enable_fast_track(conf=__conf__):
"""
If True, the agent use FastTrack when retrieving goal states
@@ -680,3 +662,28 @@ def get_firewall_rules_log_period(conf=__conf__):
NOTE: This option is experimental and may be removed in later versions of the Agent.
"""
return conf.get_int("Debug.FirewallRulesLogPeriod", 86400)
+
+
+def get_extension_policy_enabled(conf=__conf__):
+ """
+ Determine whether extension policy is enabled. If true, policy will be enforced before installing any extensions.
+ NOTE: This option is experimental and may be removed in later versions of the Agent.
+ """
+ return conf.get_switch("Debug.EnableExtensionPolicy", False)
+
+
+def get_enable_cgroup_v2_resource_limiting(conf=__conf__):
+ """
+ If True, the agent will enable resource monitoring and enforcement for the log collector on machines using cgroup v2.
+ NOTE: This option is experimental and may be removed in later versions of the Agent.
+ """
+ return conf.get_switch("Debug.EnableCgroupV2ResourceLimiting", False)
+
+
+def get_log_collector_initial_delay(conf=__conf__):
+ """
+ Determine the initial delay at service start before the first periodic log collection.
+
+ NOTE: This option is experimental and may be removed in later versions of the Agent.
+ """
+ return conf.get_int("Debug.LogCollectorInitialDelay", 5 * 60)
diff --git a/azurelinuxagent/common/event.py b/azurelinuxagent/common/event.py
index 358806f9e..64ace2cba 100644
--- a/azurelinuxagent/common/event.py
+++ b/azurelinuxagent/common/event.py
@@ -86,6 +86,8 @@ class WALAEventOperation:
Downgrade = "Downgrade"
Download = "Download"
Enable = "Enable"
+ ExtensionHandlerManifest = "ExtensionHandlerManifest"
+ ExtensionPolicy = "ExtensionPolicy"
ExtensionProcessing = "ExtensionProcessing"
ExtensionTelemetryEventProcessing = "ExtensionTelemetryEventProcessing"
FetchGoalState = "FetchGoalState"
@@ -111,6 +113,7 @@ class WALAEventOperation:
OpenSsl = "OpenSsl"
Partition = "Partition"
PersistFirewallRules = "PersistFirewallRules"
+ Policy = "Policy"
ProvisionAfterExtensions = "ProvisionAfterExtensions"
PluginSettingsVersionMismatch = "PluginSettingsVersionMismatch"
InvalidExtensionConfig = "InvalidExtensionConfig"
@@ -433,7 +436,7 @@ def initialize_vminfo_common_parameters(self, protocol):
logger.warn("Failed to get VM info from goal state; will be missing from telemetry: {0}", ustr(e))
try:
- imds_client = get_imds_client(protocol.get_endpoint())
+ imds_client = get_imds_client()
imds_info = imds_client.get_compute()
parameters[CommonTelemetryEventSchema.Location].value = imds_info.location
parameters[CommonTelemetryEventSchema.SubscriptionId].value = imds_info.subscriptionId
diff --git a/azurelinuxagent/common/osutil/factory.py b/azurelinuxagent/common/osutil/factory.py
index 58afd0af1..fd66fbb0e 100644
--- a/azurelinuxagent/common/osutil/factory.py
+++ b/azurelinuxagent/common/osutil/factory.py
@@ -142,7 +142,7 @@ def _get_osutil(distro_name, distro_code_name, distro_version, distro_full_name)
if distro_name == "iosxe":
return IosxeOSUtil()
- if distro_name == "mariner":
+ if distro_name in ["mariner", "azurelinux"]:
return MarinerOSUtil()
if distro_name == "nsbsd":
diff --git a/azurelinuxagent/common/protocol/extensions_goal_state_from_extensions_config.py b/azurelinuxagent/common/protocol/extensions_goal_state_from_extensions_config.py
index 2b98819a2..bc7b451d3 100644
--- a/azurelinuxagent/common/protocol/extensions_goal_state_from_extensions_config.py
+++ b/azurelinuxagent/common/protocol/extensions_goal_state_from_extensions_config.py
@@ -25,8 +25,8 @@
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.protocol.extensions_goal_state import ExtensionsGoalState, GoalStateChannel, GoalStateSource
from azurelinuxagent.common.protocol.restapi import ExtensionSettings, Extension, VMAgentFamily, ExtensionState, InVMGoalStateMetaData
-from azurelinuxagent.common.utils.textutil import parse_doc, parse_json, findall, find, findtext, getattrib, gettext, format_exception, \
- is_str_none_or_whitespace, is_str_empty
+from azurelinuxagent.common.utils.textutil import parse_doc, parse_json, findall, find, findtext, getattrib, gettext, \
+ format_exception, is_str_none_or_whitespace, is_str_empty, hasattrib
class ExtensionsGoalStateFromExtensionsConfig(ExtensionsGoalState):
@@ -205,8 +205,8 @@ def __parse_plugins_and_settings_and_populate_ext_handlers(self, xml_doc):
Sample ExtensionConfig Plugin and PluginSettings:
-
-
+
+
@@ -260,13 +260,14 @@ def _parse_plugin(extension, plugin):
Sample config:
-
+
+
https://rdfecurrentuswestcache3.blob.core.test-cint.azure-test.net/0e53c53ef0be4178bacb0a1fecf12a74/Microsoft.Azure.Extensions_CustomScript_usstagesc_manifest.xml
https://rdfecurrentuswestcache4.blob.core.test-cint.azure-test.net/0e53c53ef0be4178bacb0a1fecf12a74/Microsoft.Azure.Extensions_CustomScript_usstagesc_manifest.xml
-
+
@@ -294,6 +295,10 @@ def _log_error_if_none(attr_name, value):
if extension.state in (None, ""):
raise ExtensionsConfigError("Received empty Extensions.Plugins.Plugin.state, failing Handler")
+ # extension.encoded_signature value should be None if the property does not exist for the plugin. getattrib
+ # returns "" if an attribute does not exist in a node, so use hasattrib here to check if the attribute exists
+ extension.encoded_signature = getattrib(plugin, "encodedSignature") if hasattrib(plugin, "encodedSignature") else None
+
def getattrib_wrapped_in_list(node, attr_name):
attr = getattrib(node, attr_name)
return [attr] if attr not in (None, "") else []
diff --git a/azurelinuxagent/common/protocol/extensions_goal_state_from_vm_settings.py b/azurelinuxagent/common/protocol/extensions_goal_state_from_vm_settings.py
index 041ddedcd..02f2ab59b 100644
--- a/azurelinuxagent/common/protocol/extensions_goal_state_from_vm_settings.py
+++ b/azurelinuxagent/common/protocol/extensions_goal_state_from_vm_settings.py
@@ -296,6 +296,7 @@ def _parse_extensions(self, vm_settings):
# "runAsStartupTask": false,
# "isJson": true,
# "useExactVersion": true,
+ # "encodedSignature": "MIIn...",
# "settingsSeqNo": 0,
# "settings": [
# {
@@ -361,6 +362,8 @@ def _parse_extensions(self, vm_settings):
extension.name = extension_gs['name']
extension.version = extension_gs['version']
extension.state = extension_gs['state']
+ # extension.encoded_signature should be None if 'encodedSignature' key does not exist for the extension
+ extension.encoded_signature = extension_gs.get('encodedSignature')
if extension.state not in ExtensionRequestedState.All:
raise Exception('Invalid extension state: {0} ({1})'.format(extension.state, extension.name))
is_multi_config = extension_gs.get('isMultiConfig')
diff --git a/azurelinuxagent/common/protocol/goal_state.py b/azurelinuxagent/common/protocol/goal_state.py
index 2eb89c1eb..2730a0629 100644
--- a/azurelinuxagent/common/protocol/goal_state.py
+++ b/azurelinuxagent/common/protocol/goal_state.py
@@ -212,7 +212,7 @@ def update(self, silent=False):
except GoalStateInconsistentError as e:
message = "Detected an inconsistency in the goal state: {0}".format(ustr(e))
self.logger.warn(message)
- add_event(op=WALAEventOperation.GoalState, is_success=False, message=message)
+ add_event(op=WALAEventOperation.GoalState, is_success=False, log_event=False, message=message)
self._update(force_update=True)
@@ -503,7 +503,7 @@ def _fetch_full_wire_server_goal_state(self, incarnation, xml_doc):
if GoalStateProperties.RemoteAccessInfo & self._goal_state_properties:
remote_access_uri = findtext(container, "RemoteAccessInfo")
if remote_access_uri is not None:
- xml_text = self._wire_client.fetch_config(remote_access_uri, self._wire_client.get_header_for_cert())
+ xml_text = self._wire_client.fetch_config(remote_access_uri, self._wire_client.get_header_for_remote_access())
remote_access = RemoteAccess(xml_text)
if self._save_to_history:
self._history.save_remote_access(xml_text)
@@ -593,29 +593,32 @@ def __init__(self, xml_text, my_logger):
thumbprints = {}
index = 0
v1_cert_list = []
- with open(pem_file) as pem:
- for line in pem.readlines():
- buf.append(line)
- if re.match(r'[-]+END.*KEY[-]+', line):
- tmp_file = Certificates._write_to_tmp_file(index, 'prv', buf)
- pub = cryptutil.get_pubkey_from_prv(tmp_file)
- prvs[pub] = tmp_file
- buf = []
- index += 1
- elif re.match(r'[-]+END.*CERTIFICATE[-]+', line):
- tmp_file = Certificates._write_to_tmp_file(index, 'crt', buf)
- pub = cryptutil.get_pubkey_from_crt(tmp_file)
- thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file)
- thumbprints[pub] = thumbprint
- # Rename crt with thumbprint as the file name
- crt = "{0}.crt".format(thumbprint)
- v1_cert_list.append({
- "name": None,
- "thumbprint": thumbprint
- })
- os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt))
- buf = []
- index += 1
+
+ # Ensure pem_file exists before read the certs data since decrypt_p7m may clear the pem_file wen decryption fails
+ if os.path.exists(pem_file):
+ with open(pem_file) as pem:
+ for line in pem.readlines():
+ buf.append(line)
+ if re.match(r'[-]+END.*KEY[-]+', line):
+ tmp_file = Certificates._write_to_tmp_file(index, 'prv', buf)
+ pub = cryptutil.get_pubkey_from_prv(tmp_file)
+ prvs[pub] = tmp_file
+ buf = []
+ index += 1
+ elif re.match(r'[-]+END.*CERTIFICATE[-]+', line):
+ tmp_file = Certificates._write_to_tmp_file(index, 'crt', buf)
+ pub = cryptutil.get_pubkey_from_crt(tmp_file)
+ thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file)
+ thumbprints[pub] = thumbprint
+ # Rename crt with thumbprint as the file name
+ crt = "{0}.crt".format(thumbprint)
+ v1_cert_list.append({
+ "name": None,
+ "thumbprint": thumbprint
+ })
+ os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt))
+ buf = []
+ index += 1
# Rename prv key with thumbprint as the file name
for pubkey in prvs:
diff --git a/azurelinuxagent/common/protocol/imds.py b/azurelinuxagent/common/protocol/imds.py
index 5b9e206a1..fba88e0ee 100644
--- a/azurelinuxagent/common/protocol/imds.py
+++ b/azurelinuxagent/common/protocol/imds.py
@@ -27,8 +27,8 @@
IMDS_INTERNAL_SERVER_ERROR = 3
-def get_imds_client(wireserver_endpoint):
- return ImdsClient(wireserver_endpoint)
+def get_imds_client():
+ return ImdsClient()
# A *slightly* future proof list of endorsed distros.
@@ -256,7 +256,7 @@ def image_origin(self):
class ImdsClient(object):
- def __init__(self, wireserver_endpoint, version=APIVERSION):
+ def __init__(self, version=APIVERSION):
self._api_version = version
self._headers = {
'User-Agent': restutil.HTTP_USER_AGENT,
@@ -268,7 +268,6 @@ def __init__(self, wireserver_endpoint, version=APIVERSION):
}
self._regex_ioerror = re.compile(r".*HTTP Failed. GET http://[^ ]+ -- IOError .*")
self._regex_throttled = re.compile(r".*HTTP Retry. GET http://[^ ]+ -- Status Code 429 .*")
- self._wireserver_endpoint = wireserver_endpoint
def _get_metadata_url(self, endpoint, resource_path):
return BASE_METADATA_URI.format(endpoint, resource_path, self._api_version)
@@ -326,14 +325,12 @@ def get_metadata(self, resource_path, is_health):
endpoint = IMDS_ENDPOINT
status, resp = self._get_metadata_from_endpoint(endpoint, resource_path, headers)
- if status == IMDS_CONNECTION_ERROR:
- endpoint = self._wireserver_endpoint
- status, resp = self._get_metadata_from_endpoint(endpoint, resource_path, headers)
if status == IMDS_RESPONSE_SUCCESS:
return MetadataResult(True, False, resp)
elif status == IMDS_INTERNAL_SERVER_ERROR:
return MetadataResult(False, True, resp)
+ # else it's a client-side error, e.g. IMDS_CONNECTION_ERROR
return MetadataResult(False, False, resp)
def get_compute(self):
diff --git a/azurelinuxagent/common/protocol/restapi.py b/azurelinuxagent/common/protocol/restapi.py
index 35b40cf13..54e020c15 100644
--- a/azurelinuxagent/common/protocol/restapi.py
+++ b/azurelinuxagent/common/protocol/restapi.py
@@ -159,6 +159,7 @@ def __init__(self, name=None):
self.settings = []
self.manifest_uris = []
self.supports_multi_config = False
+ self.encoded_signature = None
self.__invalid_handler_setting_reason = None
@property
diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py
index c4300134d..728c1945d 100644
--- a/azurelinuxagent/common/protocol/wire.py
+++ b/azurelinuxagent/common/protocol/wire.py
@@ -38,7 +38,7 @@
ResourceGoneError, ExtensionDownloadError, InvalidContainerError, ProtocolError, HttpError, ExtensionErrorCodes
from azurelinuxagent.common.future import httpclient, bytebuffer, ustr
from azurelinuxagent.common.protocol.goal_state import GoalState, TRANSPORT_CERT_FILE_NAME, TRANSPORT_PRV_FILE_NAME, \
- GoalStateProperties
+ GoalStateProperties, GoalStateInconsistentError
from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol
from azurelinuxagent.common.protocol.restapi import DataContract, ProvisionStatus, VMInfo, VMStatus
from azurelinuxagent.common.telemetryevent import GuestAgentExtensionEventsSchema
@@ -97,7 +97,22 @@ def detect(self, init_goal_state=True, save_to_history=False):
# Initialize the goal state, including all the inner properties
if init_goal_state:
logger.info('Initializing goal state during protocol detection')
- self.client.reset_goal_state(save_to_history=save_to_history)
+ #
+ # TODO: Currently protocol detection retrieves the entire goal state. This is not needed; in particular, retrieving the Extensions goal state
+ # is not needed. However, the goal state is cached in self.client._goal_state and other components, including the Extension Handler,
+ # depend on this cached value. This has been a long-standing issue that causes multiple problems. Before removing the cached goal state,
+ # though, a careful review of these dependencies is needed.
+ #
+ # One of the problems of fetching the full goal state is that issues while retrieving it can block protocol detection and make the
+ # Agent go into a retry loop that can last 1 full hour. One particular error, GoalStateInconsistentError, can arise if the certificates
+ # needed by extensions are missing from the goal state; for example, if a FastTrack goal state is out of sync with the corresponding
+ # Fabric goal state that contains the certificates, or if decryption of the certificates fais (and hence, the certificate list is
+ # empty). The try/except below handles only this one particular problem.
+ #
+ try:
+ self.client.reset_goal_state(save_to_history=save_to_history)
+ except GoalStateInconsistentError as error:
+ logger.warn("{0}", ustr(error))
def update_host_plugin_from_goal_state(self):
self.client.update_host_plugin_from_goal_state()
@@ -1190,6 +1205,12 @@ def get_header_for_xml_content(self):
}
def get_header_for_cert(self):
+ return self._get_header_for_encrypted_request("DES_EDE3_CBC")
+
+ def get_header_for_remote_access(self):
+ return self._get_header_for_encrypted_request("AES128_CBC")
+
+ def _get_header_for_encrypted_request(self, cypher):
trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME)
try:
content = fileutil.read_file(trans_cert_file)
@@ -1200,7 +1221,7 @@ def get_header_for_cert(self):
return {
"x-ms-agent-name": "WALinuxAgent",
"x-ms-version": PROTOCOL_VERSION,
- "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-cipher-name": cypher,
"x-ms-guest-agent-public-x509-cert": cert
}
diff --git a/azurelinuxagent/common/utils/cryptutil.py b/azurelinuxagent/common/utils/cryptutil.py
index bed829ae6..00126e251 100644
--- a/azurelinuxagent/common/utils/cryptutil.py
+++ b/azurelinuxagent/common/utils/cryptutil.py
@@ -87,6 +87,16 @@ def get_thumbprint_from_crt(self, file_name):
return thumbprint
def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file):
+
+ def _cleanup_files(files_to_cleanup):
+ for file_path in files_to_cleanup:
+ if os.path.exists(file_path):
+ try:
+ os.remove(file_path)
+ logger.info("Removed file {0}", file_path)
+ except Exception as e:
+ logger.error("Failed to remove file {0}: {1}", file_path, ustr(e))
+
if not os.path.exists(p7m_file):
raise IOError(errno.ENOENT, "File not found", p7m_file)
elif not os.path.exists(trans_prv_file):
@@ -99,6 +109,13 @@ def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file):
except shellutil.CommandError as command_error:
logger.error("Failed to decrypt {0} (return code: {1})\n[stdout]\n{2}\n[stderr]\n{3}",
p7m_file, command_error.returncode, command_error.stdout, command_error.stderr)
+ # If the decryption fails, old version of openssl overwrite the output file(if exist) with empty data while
+ # new version of openssl(3.2.2) does not overwrite the output file, So output file may contain old certs data.
+ # Correcting the behavior by removing the temporary output files since having empty/no data is makes sense when decryption fails
+ # otherwise we end up processing old certs again.
+ files_to_remove = [p7m_file, pem_file]
+ logger.info("Removing temporary state certificate files {0}", files_to_remove)
+ _cleanup_files(files_to_remove)
def crt_to_ssh(self, input_file, output_file):
with open(output_file, "ab") as file_out:
diff --git a/azurelinuxagent/common/utils/textutil.py b/azurelinuxagent/common/utils/textutil.py
index 4a0f9a754..b5e15c9f7 100644
--- a/azurelinuxagent/common/utils/textutil.py
+++ b/azurelinuxagent/common/utils/textutil.py
@@ -85,7 +85,7 @@ def findtext(root, tag, namespace=None):
def getattrib(node, attr_name):
"""
- Get attribute of xml node
+ Get attribute of xml node. Returns None if node is None. Returns "" if node does not have attribute attr_name
"""
if node is not None:
return node.getAttribute(attr_name)
@@ -93,6 +93,16 @@ def getattrib(node, attr_name):
return None
+def hasattrib(node, attr_name):
+ """
+ Return True if xml node has attribute, False if node is None or node does not have attribute attr_name
+ """
+ if node is not None:
+ return node.hasAttribute(attr_name)
+ else:
+ return False
+
+
def unpack(buf, offset, value_range):
"""
Unpack bytes into python values.
diff --git a/azurelinuxagent/ga/agent_update_handler.py b/azurelinuxagent/ga/agent_update_handler.py
index 8caec1087..ee6a44f9f 100644
--- a/azurelinuxagent/ga/agent_update_handler.py
+++ b/azurelinuxagent/ga/agent_update_handler.py
@@ -29,24 +29,44 @@
from azurelinuxagent.ga.self_update_version_updater import SelfUpdateVersionUpdater
+class UpdateMode(object):
+ """
+ Enum for Update modes
+ """
+ RSM = "RSM"
+ SelfUpdate = "SelfUpdate"
+
+
def get_agent_update_handler(protocol):
return AgentUpdateHandler(protocol)
+RSM_UPDATE_STATE_FILE = "waagent_rsm_update"
+INITIAL_UPDATE_STATE_FILE = "waagent_initial_update"
+
+
class AgentUpdateHandler(object):
"""
This class handles two type of agent updates. Handler initializes the updater to SelfUpdateVersionUpdater and switch to appropriate updater based on below conditions:
- RSM update: This is the update requested by RSM. The contract between CRP and agent is we get following properties in the goal state:
+ RSM update: This update requested by RSM and contract between CRP and agent is we get following properties in the goal state:
version: it will have what version to update
isVersionFromRSM: True if the version is from RSM deployment.
isVMEnabledForRSMUpgrades: True if the VM is enabled for RSM upgrades.
- if vm enabled for RSM upgrades, we use RSM update path. But if requested update is not by rsm deployment
+ if vm enabled for RSM upgrades, we use RSM update path. But if requested update is not by rsm deployment( if isVersionFromRSM:False)
we ignore the update.
- Self update: We fallback to this if above is condition not met. This update to the largest version available in the manifest
+ Self update: We fallback to this if above condition not met. This update to the largest version available in the manifest.
+ Also, we use self-update for initial update due to [1][2]
Note: Self-update don't support downgrade.
- Handler keeps the rsm state of last update is with RSM or not on every new goal state. Once handler decides which updater to use, then
- does following steps:
+ [1] New vms that are enrolled into RSM, they get isVMEnabledForRSMUpgrades as True and isVersionFromRSM as False in first goal state. As per RSM update flow mentioned above,
+ we don't apply the update if isVersionFromRSM is false. Consequently, new vms remain on pre-installed agent until RSM drives a new version update. In the meantime, agent may process the extensions with the baked version.
+ This can potentially lead to issues due to incompatibility.
+ [2] If current version is N, and we are deploying N+1. We find an issue on N+1 and remove N+1 from PIR. If CRP created the initial goal state for a new vm
+ before the delete, the version in the goal state would be N+1; If the agent starts processing the goal state after the deleting, it won't find N+1 and update will fail and
+ the vm will use baked version.
+
+ Handler updates the state if current update mode is changed from last update mode(RSM or Self-Update) on new goal state. Once handler decides which updater to use, then
+ updater does following steps:
1. Retrieve the agent version from the goal state.
2. Check if we allowed to update for that version.
3. Log the update message.
@@ -63,8 +83,8 @@ def __init__(self, protocol):
self._daemon_version = self._get_daemon_version_for_update()
self._last_attempted_update_error_msg = ""
- # restore the state of rsm update. Default to self-update if last update is not with RSM.
- if not self._get_is_last_update_with_rsm():
+ # Restore the state of rsm update. Default to self-update if last update is not with RSM or if agent doing initial update
+ if not self._get_is_last_update_with_rsm() or self._is_initial_update():
self._updater = SelfUpdateVersionUpdater(self._gs_id)
else:
self._updater = RSMVersionUpdater(self._gs_id, self._daemon_version)
@@ -78,14 +98,39 @@ def _get_daemon_version_for_update():
# use the min version as 2.2.53 as we started setting the daemon version starting 2.2.53.
return FlexibleVersion("2.2.53")
+ @staticmethod
+ def _get_initial_update_state_file():
+ """
+ This file keeps if initial update is attempted or not
+ """
+ return os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE)
+
+ def _save_initial_update_state_file(self):
+ """
+ Save the file if agent attempted initial update
+ """
+ try:
+ with open(self._get_initial_update_state_file(), "w"):
+ pass
+ except Exception as e:
+ msg = "Error creating the initial update state file ({0}): {1}".format(self._get_initial_update_state_file(), ustr(e))
+ logger.warn(msg)
+ add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False)
+
+ def _is_initial_update(self):
+ """
+ Returns True if state file doesn't exit as presence of file consider as initial update already attempted
+ """
+ return not os.path.exists(self._get_initial_update_state_file())
+
@staticmethod
def _get_rsm_update_state_file():
"""
This file keeps if last attempted update is rsm or not.
"""
- return os.path.join(conf.get_lib_dir(), "rsm_update.json")
+ return os.path.join(conf.get_lib_dir(), RSM_UPDATE_STATE_FILE)
- def _save_rsm_update_state(self):
+ def _save_rsm_update_state_file(self):
"""
Save the rsm state empty file when we switch to RSM
"""
@@ -93,9 +138,11 @@ def _save_rsm_update_state(self):
with open(self._get_rsm_update_state_file(), "w"):
pass
except Exception as e:
- logger.warn("Error creating the RSM state ({0}): {1}", self._get_rsm_update_state_file(), ustr(e))
+ msg = "Error creating the RSM state file ({0}): {1}".format(self._get_rsm_update_state_file(), ustr(e))
+ logger.warn(msg)
+ add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False)
- def _remove_rsm_update_state(self):
+ def _remove_rsm_update_state_file(self):
"""
Remove the rsm state file when we switch to self-update
"""
@@ -103,7 +150,9 @@ def _remove_rsm_update_state(self):
if os.path.exists(self._get_rsm_update_state_file()):
os.remove(self._get_rsm_update_state_file())
except Exception as e:
- logger.warn("Error removing the RSM state ({0}): {1}", self._get_rsm_update_state_file(), ustr(e))
+ msg = "Error removing the RSM state file ({0}): {1}".format(self._get_rsm_update_state_file(), ustr(e))
+ logger.warn(msg)
+ add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False)
def _get_is_last_update_with_rsm(self):
"""
@@ -138,6 +187,15 @@ def _get_agent_family_manifest(self, goal_state):
family, self._gs_id))
return agent_family_manifests[0]
+ def get_current_update_mode(self):
+ """
+ Returns current update mode whether RSM or Self-Update
+ """
+ if isinstance(self._updater, RSMVersionUpdater):
+ return UpdateMode.RSM
+ else:
+ return UpdateMode.SelfUpdate
+
def run(self, goal_state, ext_gs_updated):
try:
@@ -147,30 +205,36 @@ def run(self, goal_state, ext_gs_updated):
# Update the state only on new goal state
if ext_gs_updated:
+ # Reset the last reported update state on new goal state before we attempt update otherwise we keep reporting the last update error if any
+ self._last_attempted_update_error_msg = ""
self._gs_id = goal_state.extensions_goal_state.id
self._updater.sync_new_gs_id(self._gs_id)
agent_family = self._get_agent_family_manifest(goal_state)
- # Updater will return True or False if we need to switch the updater
- # If self-updater receives RSM update enabled, it will switch to RSM updater
- # If RSM updater receives RSM update disabled, it will switch to self-update
- # No change in updater if GS not updated
- is_rsm_update_enabled = self._updater.is_rsm_update_enabled(agent_family, ext_gs_updated)
+ # Always agent uses self-update for initial update regardless vm enrolled into RSM or not
+ # So ignoring the check for updater switch for the initial goal state/update
+ if not self._is_initial_update():
- if not is_rsm_update_enabled and isinstance(self._updater, RSMVersionUpdater):
- msg = "VM not enabled for RSM updates, switching to self-update mode"
- logger.info(msg)
- add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False)
- self._updater = SelfUpdateVersionUpdater(self._gs_id)
- self._remove_rsm_update_state()
+ # Updater will return True or False if we need to switch the updater
+ # If self-updater receives RSM update enabled, it will switch to RSM updater
+ # If RSM updater receives RSM update disabled, it will switch to self-update
+ # No change in updater if GS not updated
+ is_rsm_update_enabled = self._updater.is_rsm_update_enabled(agent_family, ext_gs_updated)
- if is_rsm_update_enabled and isinstance(self._updater, SelfUpdateVersionUpdater):
- msg = "VM enabled for RSM updates, switching to RSM update mode"
- logger.info(msg)
- add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False)
- self._updater = RSMVersionUpdater(self._gs_id, self._daemon_version)
- self._save_rsm_update_state()
+ if not is_rsm_update_enabled and isinstance(self._updater, RSMVersionUpdater):
+ msg = "VM not enabled for RSM updates, switching to self-update mode"
+ logger.info(msg)
+ add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False)
+ self._updater = SelfUpdateVersionUpdater(self._gs_id)
+ self._remove_rsm_update_state_file()
+
+ if is_rsm_update_enabled and isinstance(self._updater, SelfUpdateVersionUpdater):
+ msg = "VM enabled for RSM updates, switching to RSM update mode"
+ logger.info(msg)
+ add_event(op=WALAEventOperation.AgentUpgrade, message=msg, log_event=False)
+ self._updater = RSMVersionUpdater(self._gs_id, self._daemon_version)
+ self._save_rsm_update_state_file()
# If updater is changed in previous step, we allow update as it consider as first attempt. If not, it checks below condition
# RSM checks new goal state; self-update checks manifest download interval
@@ -218,6 +282,11 @@ def run(self, goal_state, ext_gs_updated):
add_event(op=WALAEventOperation.AgentUpgrade, is_success=False, message=error_msg, log_event=False)
self._last_attempted_update_error_msg = error_msg
+ # save initial update state when agent is doing first update
+ finally:
+ if self._is_initial_update():
+ self._save_initial_update_state_file()
+
def get_vmagent_update_status(self):
"""
This function gets the VMAgent update status as per the last attempted update.
diff --git a/azurelinuxagent/ga/cgroup.py b/azurelinuxagent/ga/cgroup.py
deleted file mode 100644
index b2bf32fbc..000000000
--- a/azurelinuxagent/ga/cgroup.py
+++ /dev/null
@@ -1,392 +0,0 @@
-# Copyright 2018 Microsoft Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Requires Python 2.6+ and Openssl 1.0+
-
-import errno
-import os
-import re
-from datetime import timedelta
-
-from azurelinuxagent.common import logger, conf
-from azurelinuxagent.common.exception import CGroupsException
-from azurelinuxagent.common.future import ustr
-from azurelinuxagent.common.osutil import get_osutil
-from azurelinuxagent.common.utils import fileutil
-
-_REPORT_EVERY_HOUR = timedelta(hours=1)
-_DEFAULT_REPORT_PERIOD = timedelta(seconds=conf.get_cgroup_check_period())
-
-AGENT_NAME_TELEMETRY = "walinuxagent.service" # Name used for telemetry; it needs to be consistent even if the name of the service changes
-AGENT_LOG_COLLECTOR = "azure-walinuxagent-logcollector"
-
-
-class CounterNotFound(Exception):
- pass
-
-
-class MetricValue(object):
-
- """
- Class for defining all the required metric fields to send telemetry.
- """
-
- def __init__(self, category, counter, instance, value, report_period=_DEFAULT_REPORT_PERIOD):
- self._category = category
- self._counter = counter
- self._instance = instance
- self._value = value
- self._report_period = report_period
-
- @property
- def category(self):
- return self._category
-
- @property
- def counter(self):
- return self._counter
-
- @property
- def instance(self):
- return self._instance
-
- @property
- def value(self):
- return self._value
-
- @property
- def report_period(self):
- return self._report_period
-
-
-class MetricsCategory(object):
- MEMORY_CATEGORY = "Memory"
- CPU_CATEGORY = "CPU"
-
-
-class MetricsCounter(object):
- PROCESSOR_PERCENT_TIME = "% Processor Time"
- TOTAL_MEM_USAGE = "Total Memory Usage"
- MAX_MEM_USAGE = "Max Memory Usage"
- THROTTLED_TIME = "Throttled Time"
- SWAP_MEM_USAGE = "Swap Memory Usage"
- AVAILABLE_MEM = "Available MBytes"
- USED_MEM = "Used MBytes"
-
-
-re_user_system_times = re.compile(r'user (\d+)\nsystem (\d+)\n')
-
-
-class CGroup(object):
- def __init__(self, name, cgroup_path):
- """
- Initialize _data collection for the Memory controller
- :param: name: Name of the CGroup
- :param: cgroup_path: Path of the controller
- :return:
- """
- self.name = name
- self.path = cgroup_path
-
- def __str__(self):
- return "{0} [{1}]".format(self.name, self.path)
-
- def _get_cgroup_file(self, file_name):
- return os.path.join(self.path, file_name)
-
- def _get_file_contents(self, file_name):
- """
- Retrieve the contents to file.
-
- :param str file_name: Name of file within that metric controller
- :return: Entire contents of the file
- :rtype: str
- """
- parameter_file = self._get_cgroup_file(file_name)
-
- return fileutil.read_file(parameter_file)
-
- def _get_parameters(self, parameter_name, first_line_only=False):
- """
- Retrieve the values of a parameter from a controller.
- Returns a list of values in the file.
-
- :param first_line_only: return only the first line.
- :param str parameter_name: Name of file within that metric controller
- :return: The first line of the file, without line terminator
- :rtype: [str]
- """
- result = []
- try:
- values = self._get_file_contents(parameter_name).splitlines()
- result = values[0] if first_line_only else values
- except IndexError:
- parameter_filename = self._get_cgroup_file(parameter_name)
- logger.error("File {0} is empty but should not be".format(parameter_filename))
- raise CGroupsException("File {0} is empty but should not be".format(parameter_filename))
- except Exception as e:
- if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101
- raise e
- parameter_filename = self._get_cgroup_file(parameter_name)
- raise CGroupsException("Exception while attempting to read {0}".format(parameter_filename), e)
- return result
-
- def is_active(self):
- try:
- tasks = self._get_parameters("tasks")
- if tasks:
- return len(tasks) != 0
- except (IOError, OSError) as e:
- if e.errno == errno.ENOENT:
- # only suppressing file not found exceptions.
- pass
- else:
- logger.periodic_warn(logger.EVERY_HALF_HOUR,
- 'Could not get list of tasks from "tasks" file in the cgroup: {0}.'
- ' Internal error: {1}'.format(self.path, ustr(e)))
- except CGroupsException as e:
- logger.periodic_warn(logger.EVERY_HALF_HOUR,
- 'Could not get list of tasks from "tasks" file in the cgroup: {0}.'
- ' Internal error: {1}'.format(self.path, ustr(e)))
- return False
-
- def get_tracked_metrics(self, **_):
- """
- Retrieves the current value of the metrics tracked for this cgroup and returns them as an array.
-
- Note: Agent won't track the metrics if the current cpu ticks less than previous value and returns empty array.
- """
- raise NotImplementedError()
-
-
-class CpuCgroup(CGroup):
- def __init__(self, name, cgroup_path):
- super(CpuCgroup, self).__init__(name, cgroup_path)
-
- self._osutil = get_osutil()
- self._previous_cgroup_cpu = None
- self._previous_system_cpu = None
- self._current_cgroup_cpu = None
- self._current_system_cpu = None
- self._previous_throttled_time = None
- self._current_throttled_time = None
-
- def _get_cpu_ticks(self, allow_no_such_file_or_directory_error=False):
- """
- Returns the number of USER_HZ of CPU time (user and system) consumed by this cgroup.
-
- If allow_no_such_file_or_directory_error is set to True and cpuacct.stat does not exist the function
- returns 0; this is useful when the function can be called before the cgroup has been created.
- """
- try:
- cpuacct_stat = self._get_file_contents('cpuacct.stat')
- except Exception as e:
- if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: # pylint: disable=E1101
- raise CGroupsException("Failed to read cpuacct.stat: {0}".format(ustr(e)))
- if not allow_no_such_file_or_directory_error:
- raise e
- cpuacct_stat = None
-
- cpu_ticks = 0
-
- if cpuacct_stat is not None:
- #
- # Sample file:
- # # cat /sys/fs/cgroup/cpuacct/azure.slice/walinuxagent.service/cpuacct.stat
- # user 10190
- # system 3160
- #
- match = re_user_system_times.match(cpuacct_stat)
- if not match:
- raise CGroupsException(
- "The contents of {0} are invalid: {1}".format(self._get_cgroup_file('cpuacct.stat'), cpuacct_stat))
- cpu_ticks = int(match.groups()[0]) + int(match.groups()[1])
-
- return cpu_ticks
-
- def get_throttled_time(self):
- try:
- with open(os.path.join(self.path, 'cpu.stat')) as cpu_stat:
- #
- # Sample file:
- #
- # # cat /sys/fs/cgroup/cpuacct/azure.slice/walinuxagent.service/cpu.stat
- # nr_periods 51660
- # nr_throttled 19461
- # throttled_time 1529590856339
- #
- for line in cpu_stat:
- match = re.match(r'throttled_time\s+(\d+)', line)
- if match is not None:
- return int(match.groups()[0])
- raise Exception("Cannot find throttled_time")
- except (IOError, OSError) as e:
- if e.errno == errno.ENOENT:
- return 0
- raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e)))
- except Exception as e:
- raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e)))
-
- def _cpu_usage_initialized(self):
- return self._current_cgroup_cpu is not None and self._current_system_cpu is not None
-
- def initialize_cpu_usage(self):
- """
- Sets the initial values of CPU usage. This function must be invoked before calling get_cpu_usage().
- """
- if self._cpu_usage_initialized():
- raise CGroupsException("initialize_cpu_usage() should be invoked only once")
- self._current_cgroup_cpu = self._get_cpu_ticks(allow_no_such_file_or_directory_error=True)
- self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot()
- self._current_throttled_time = self.get_throttled_time()
-
- def get_cpu_usage(self):
- """
- Computes the CPU used by the cgroup since the last call to this function.
-
- The usage is measured as a percentage of utilization of 1 core in the system. For example,
- using 1 core all of the time on a 4-core system would be reported as 100%.
-
- NOTE: initialize_cpu_usage() must be invoked before calling get_cpu_usage()
- """
- if not self._cpu_usage_initialized():
- raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_usage()")
-
- self._previous_cgroup_cpu = self._current_cgroup_cpu
- self._previous_system_cpu = self._current_system_cpu
- self._current_cgroup_cpu = self._get_cpu_ticks()
- self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot()
-
- cgroup_delta = self._current_cgroup_cpu - self._previous_cgroup_cpu
- system_delta = max(1, self._current_system_cpu - self._previous_system_cpu)
-
- return round(100.0 * self._osutil.get_processor_cores() * float(cgroup_delta) / float(system_delta), 3)
-
- def get_cpu_throttled_time(self, read_previous_throttled_time=True):
- """
- Computes the throttled time (in seconds) since the last call to this function.
- NOTE: initialize_cpu_usage() must be invoked before calling this function
- Compute only current throttled time if read_previous_throttled_time set to False
- """
- if not read_previous_throttled_time:
- return float(self.get_throttled_time() / 1E9)
-
- if not self._cpu_usage_initialized():
- raise CGroupsException(
- "initialize_cpu_usage() must be invoked before the first call to get_throttled_time()")
-
- self._previous_throttled_time = self._current_throttled_time
- self._current_throttled_time = self.get_throttled_time()
-
- return float(self._current_throttled_time - self._previous_throttled_time) / 1E9
-
- def get_tracked_metrics(self, **kwargs):
- tracked = []
- cpu_usage = self.get_cpu_usage()
- if cpu_usage >= float(0):
- tracked.append(
- MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.PROCESSOR_PERCENT_TIME, self.name, cpu_usage))
-
- if 'track_throttled_time' in kwargs and kwargs['track_throttled_time']:
- throttled_time = self.get_cpu_throttled_time()
- if cpu_usage >= float(0) and throttled_time >= float(0):
- tracked.append(
- MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.THROTTLED_TIME, self.name, throttled_time))
-
- return tracked
-
-
-class MemoryCgroup(CGroup):
- def __init__(self, name, cgroup_path):
- super(MemoryCgroup, self).__init__(name, cgroup_path)
-
- self._counter_not_found_error_count = 0
-
- def _get_memory_stat_counter(self, counter_name):
- try:
- with open(os.path.join(self.path, 'memory.stat')) as memory_stat:
- # cat /sys/fs/cgroup/memory/azure.slice/memory.stat
- # cache 67178496
- # rss 42340352
- # rss_huge 6291456
- # swap 0
- for line in memory_stat:
- re_memory_counter = r'{0}\s+(\d+)'.format(counter_name)
- match = re.match(re_memory_counter, line)
- if match is not None:
- return int(match.groups()[0])
- except (IOError, OSError) as e:
- if e.errno == errno.ENOENT:
- raise
- raise CGroupsException("Failed to read memory.stat: {0}".format(ustr(e)))
- except Exception as e:
- raise CGroupsException("Failed to read memory.stat: {0}".format(ustr(e)))
-
- raise CounterNotFound("Cannot find counter: {0}".format(counter_name))
-
- def get_memory_usage(self):
- """
- Collect RSS+CACHE from memory.stat cgroup.
-
- :return: Memory usage in bytes
- :rtype: int
- """
-
- cache = self._get_memory_stat_counter("cache")
- rss = self._get_memory_stat_counter("rss")
- return cache + rss
-
- def try_swap_memory_usage(self):
- """
- Collect SWAP from memory.stat cgroup.
-
- :return: Memory usage in bytes
- :rtype: int
- Note: stat file is the only place to get the SWAP since other swap related file memory.memsw.usage_in_bytes is for total Memory+SWAP.
- """
- try:
- return self._get_memory_stat_counter("swap")
- except CounterNotFound as e:
- if self._counter_not_found_error_count < 1:
- logger.periodic_info(logger.EVERY_HALF_HOUR,
- '{0} from "memory.stat" file in the cgroup: {1}---[Note: This log for informational purpose only and can be ignored]'.format(ustr(e), self.path))
- self._counter_not_found_error_count += 1
- return 0
-
- def get_max_memory_usage(self):
- """
- Collect memory.max_usage_in_bytes from the cgroup.
-
- :return: Memory usage in bytes
- :rtype: int
- """
- usage = 0
- try:
- usage = int(self._get_parameters('memory.max_usage_in_bytes', first_line_only=True))
- except Exception as e:
- if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101
- raise
- raise CGroupsException("Exception while attempting to read {0}".format("memory.max_usage_in_bytes"), e)
-
- return usage
-
- def get_tracked_metrics(self, **_):
- return [
- MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.TOTAL_MEM_USAGE, self.name,
- self.get_memory_usage()),
- MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.MAX_MEM_USAGE, self.name,
- self.get_max_memory_usage(), _REPORT_EVERY_HOUR),
- MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.SWAP_MEM_USAGE, self.name,
- self.try_swap_memory_usage(), _REPORT_EVERY_HOUR)
- ]
diff --git a/azurelinuxagent/ga/cgroupapi.py b/azurelinuxagent/ga/cgroupapi.py
index 3bce05350..72b41ec77 100644
--- a/azurelinuxagent/ga/cgroupapi.py
+++ b/azurelinuxagent/ga/cgroupapi.py
@@ -24,8 +24,9 @@
from azurelinuxagent.common import logger
from azurelinuxagent.common.event import WALAEventOperation, add_event
-from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup
from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry
+from azurelinuxagent.ga.cpucontroller import _CpuController, CpuControllerV1, CpuControllerV2
+from azurelinuxagent.ga.memorycontroller import MemoryControllerV1, MemoryControllerV2
from azurelinuxagent.common.conf import get_agent_pid_file_path
from azurelinuxagent.common.exception import CGroupsException, ExtensionErrorCodes, ExtensionError, \
ExtensionOperationError
@@ -185,14 +186,14 @@ def get_cgroup_api():
if available_unified_controllers != "":
raise CGroupsException("Detected hybrid cgroup mode, but there are controllers available to be enabled in unified hierarchy: {0}".format(available_unified_controllers))
- cgroup_api = SystemdCgroupApiv1()
+ cgroup_api_v1 = SystemdCgroupApiv1()
# Previously the agent supported users mounting cgroup v1 controllers in locations other than the systemd
- # default ('/sys/fs/cgroup'). The agent no longer supports this scenario. If either the cpu or memory
- # controller is mounted in a location other than the systemd default, raise Exception.
- if not cgroup_api.are_mountpoints_systemd_created():
- raise InvalidCgroupMountpointException("Expected cgroup controllers to be mounted at '{0}', but at least one is not. v1 mount points: \n{1}".format(CGROUP_FILE_SYSTEM_ROOT, json.dumps(cgroup_api.get_controller_root_paths())))
+ # default ('/sys/fs/cgroup'). The agent no longer supports this scenario. If any agent supported controller is
+ # mounted in a location other than the systemd default, raise Exception.
+ if not cgroup_api_v1.are_mountpoints_systemd_created():
+ raise InvalidCgroupMountpointException("Expected cgroup controllers to be mounted at '{0}', but at least one is not. v1 mount points: \n{1}".format(CGROUP_FILE_SYSTEM_ROOT, json.dumps(cgroup_api_v1.get_controller_mountpoints())))
log_cgroup_info("Using cgroup v1 for resource enforcement and monitoring")
- return cgroup_api
+ return cgroup_api_v1
raise CGroupsException("{0} has an unexpected file type: {1}".format(CGROUP_FILE_SYSTEM_ROOT, root_hierarchy_mode))
@@ -202,7 +203,6 @@ class _SystemdCgroupApi(object):
Cgroup interface via systemd. Contains common api implementations between cgroup v1 and v2.
"""
def __init__(self):
- self._agent_unit_name = None
self._systemd_run_commands = []
self._systemd_run_commands_lock = threading.RLock()
@@ -213,55 +213,36 @@ def get_systemd_run_commands(self):
with self._systemd_run_commands_lock:
return self._systemd_run_commands[:]
- def get_controller_root_paths(self):
+ def get_unit_cgroup(self, unit_name, cgroup_name):
"""
- Cgroup version specific. Returns a tuple with the root paths for the cpu and memory controllers; the values can
- be None if the corresponding controller is not mounted or enabled at the root cgroup.
+ Cgroup version specific. Returns a representation of the unit cgroup.
+
+ :param unit_name: The unit to return the cgroup of.
+ :param cgroup_name: A name to represent the cgroup. Used for logging/tracking purposes.
"""
raise NotImplementedError()
- def get_unit_cgroup_paths(self, unit_name):
- """
- Returns a tuple with the path of the cpu and memory cgroups for the given unit.
- The values returned can be None if the controller is not mounted or enabled.
+ def get_cgroup_from_relative_path(self, relative_path, cgroup_name):
"""
- # Ex: ControlGroup=/azure.slice/walinuxagent.service
- # controlgroup_path[1:] = azure.slice/walinuxagent.service
- controlgroup_path = systemd.get_unit_property(unit_name, "ControlGroup")
- cpu_root_path, memory_root_path = self.get_controller_root_paths()
-
- cpu_cgroup_path = os.path.join(cpu_root_path, controlgroup_path[1:]) \
- if cpu_root_path is not None else None
-
- memory_cgroup_path = os.path.join(memory_root_path, controlgroup_path[1:]) \
- if memory_root_path is not None else None
+ Cgroup version specific. Returns a representation of the cgroup at the provided relative path.
- return cpu_cgroup_path, memory_cgroup_path
-
- def get_process_cgroup_paths(self, process_id):
- """
- Returns a tuple with the path of the cpu and memory cgroups for the given process.
- The 'process_id' can be a numeric PID or the string "self" for the current process.
- The values returned can be None if the controller is not mounted or enabled.
+ :param relative_path: The relative path to return the cgroup of.
+ :param cgroup_name: A name to represent the cgroup. Used for logging/tracking purposes.
"""
- cpu_cgroup_relative_path, memory_cgroup_relative_path = self.get_process_cgroup_relative_paths(process_id)
-
- cpu_root_path, memory_root_path = self.get_controller_root_paths()
-
- cpu_cgroup_path = os.path.join(cpu_root_path, cpu_cgroup_relative_path) \
- if cpu_root_path is not None and cpu_cgroup_relative_path is not None else None
+ raise NotImplementedError()
- memory_cgroup_path = os.path.join(memory_root_path, memory_cgroup_relative_path) \
- if memory_root_path is not None and memory_cgroup_relative_path is not None else None
+ def get_process_cgroup(self, process_id, cgroup_name):
+ """
+ Cgroup version specific. Returns a representation of the process' cgroup.
- return cpu_cgroup_path, memory_cgroup_path
+ :param process_id: A numeric PID to return the cgroup of, or the string "self" to return the cgroup of the current process.
+ :param cgroup_name: A name to represent the cgroup. Used for logging/tracking purposes.
+ """
+ raise NotImplementedError()
- def get_process_cgroup_relative_paths(self, process_id):
+ def log_root_paths(self):
"""
- Cgroup version specific. Returns a tuple with the path of the cpu and memory cgroups for the given process
- (relative to the root path of the corresponding controller).
- The 'process_id' can be a numeric PID or the string "self" for the current process.
- The values returned can be None if the controller is not mounted or enabled.
+ Cgroup version specific. Logs the root paths of the cgroup filesystem/controllers.
"""
raise NotImplementedError()
@@ -279,11 +260,6 @@ def _is_systemd_failure(scope_name, stderr):
unit_not_found = "Unit {0} not found.".format(scope_name)
return unit_not_found in stderr or scope_name not in stderr
- @staticmethod
- def get_processes_in_cgroup(cgroup_path):
- with open(os.path.join(cgroup_path, "cgroup.procs"), "r") as cgroup_procs:
- return [int(pid) for pid in cgroup_procs.read().split()]
-
class SystemdCgroupApiv1(_SystemdCgroupApi):
"""
@@ -293,7 +269,8 @@ def __init__(self):
super(SystemdCgroupApiv1, self).__init__()
self._cgroup_mountpoints = self._get_controller_mountpoints()
- def _get_controller_mountpoints(self):
+ @staticmethod
+ def _get_controller_mountpoints():
"""
In v1, each controller is mounted at a different path. Use findmnt to get each path.
@@ -304,7 +281,8 @@ def _get_controller_mountpoints(self):
/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct
etc
- Returns a dictionary of the controller-path mappings.
+ Returns a dictionary of the controller-path mappings. The dictionary only includes the controllers which are
+ supported by the agent.
"""
mount_points = {}
for line in shellutil.run_command(['findmnt', '-t', 'cgroup', '--noheadings']).splitlines():
@@ -315,51 +293,91 @@ def _get_controller_mountpoints(self):
if match is not None:
path = match.group('path')
controller = match.group('controller')
- if controller is not None and path is not None:
+ if controller is not None and path is not None and controller in CgroupV1.get_supported_controller_names():
mount_points[controller] = path
return mount_points
+ def get_controller_mountpoints(self):
+ """
+ Returns a dictionary of controller-mountpoint mappings.
+ """
+ return self._cgroup_mountpoints
+
def are_mountpoints_systemd_created(self):
"""
- Systemd mounts each controller at '/sys/fs/cgroup/'. Returns True if both cpu and memory
- mountpoints match this pattern, False otherwise.
+ Systemd mounts each controller at '/sys/fs/cgroup/'. Returns True if all mounted controllers which
+ are supported by the agent have mountpoints which match this pattern, False otherwise.
The agent does not support cgroup usage if the default root systemd mountpoint (/sys/fs/cgroup) is not used.
This method is used to check if any users are using non-systemd mountpoints. If they are, the agent drop-in
files will be cleaned up in cgroupconfigurator.
"""
- cpu_mountpoint = self._cgroup_mountpoints.get('cpu,cpuacct')
- memory_mountpoint = self._cgroup_mountpoints.get('memory')
- if cpu_mountpoint is not None and cpu_mountpoint != os.path.join(CGROUP_FILE_SYSTEM_ROOT, 'cpu,cpuacct'):
- return False
- if memory_mountpoint is not None and memory_mountpoint != os.path.join(CGROUP_FILE_SYSTEM_ROOT, 'memory'):
- return False
+ for controller, mount_point in self._cgroup_mountpoints.items():
+ if mount_point != os.path.join(CGROUP_FILE_SYSTEM_ROOT, controller):
+ return False
return True
- def get_controller_root_paths(self):
- # Return a tuple representing the mountpoints for cpu and memory. Either should be None if the corresponding
- # controller is not mounted.
- return self._cgroup_mountpoints.get('cpu,cpuacct'), self._cgroup_mountpoints.get('memory')
-
- def get_process_cgroup_relative_paths(self, process_id):
- # The contents of the file are similar to
- # # cat /proc/1218/cgroup
- # 10:memory:/system.slice/walinuxagent.service
- # 3:cpu,cpuacct:/system.slice/walinuxagent.service
- # etc
- cpu_path = None
- memory_path = None
+ @staticmethod
+ def _get_process_relative_controller_paths(process_id):
+ """
+ Returns the relative paths of the cgroup for the given process as a dict of controller-path mappings. The result
+ only includes controllers which are supported.
+ The contents of the /proc/{process_id}/cgroup file are similar to
+ # cat /proc/1218/cgroup
+ 10:memory:/system.slice/walinuxagent.service
+ 3:cpu,cpuacct:/system.slice/walinuxagent.service
+ etc
+
+ :param process_id: A numeric PID to return the relative paths of, or the string "self" to return the relative paths of the current process.
+ """
+ conroller_relative_paths = {}
for line in fileutil.read_file("/proc/{0}/cgroup".format(process_id)).splitlines():
- match = re.match(r'\d+:(?P(memory|.*cpuacct.*)):(?P.+)', line)
+ match = re.match(r'\d+:(?P.+):(?P.+)', line)
if match is not None:
controller = match.group('controller')
path = match.group('path').lstrip('/') if match.group('path') != '/' else None
- if controller == 'memory':
- memory_path = path
- else:
- cpu_path = path
+ if path is not None and controller in CgroupV1.get_supported_controller_names():
+ conroller_relative_paths[controller] = path
+
+ return conroller_relative_paths
+
+ def get_unit_cgroup(self, unit_name, cgroup_name):
+ unit_cgroup_relative_path = systemd.get_unit_property(unit_name, "ControlGroup")
+ unit_controller_paths = {}
- return cpu_path, memory_path
+ for controller, mountpoint in self._cgroup_mountpoints.items():
+ unit_controller_paths[controller] = os.path.join(mountpoint, unit_cgroup_relative_path[1:])
+
+ return CgroupV1(cgroup_name=cgroup_name, controller_mountpoints=self._cgroup_mountpoints,
+ controller_paths=unit_controller_paths)
+
+ def get_cgroup_from_relative_path(self, relative_path, cgroup_name):
+ controller_paths = {}
+ for controller, mountpoint in self._cgroup_mountpoints.items():
+ controller_paths[controller] = os.path.join(mountpoint, relative_path)
+
+ return CgroupV1(cgroup_name=cgroup_name, controller_mountpoints=self._cgroup_mountpoints,
+ controller_paths=controller_paths)
+
+ def get_process_cgroup(self, process_id, cgroup_name):
+ relative_controller_paths = self._get_process_relative_controller_paths(process_id)
+ process_controller_paths = {}
+
+ for controller, mountpoint in self._cgroup_mountpoints.items():
+ relative_controller_path = relative_controller_paths.get(controller)
+ if relative_controller_path is not None:
+ process_controller_paths[controller] = os.path.join(mountpoint, relative_controller_path)
+
+ return CgroupV1(cgroup_name=cgroup_name, controller_mountpoints=self._cgroup_mountpoints,
+ controller_paths=process_controller_paths)
+
+ def log_root_paths(self):
+ for controller in CgroupV1.get_supported_controller_names():
+ mount_point = self._cgroup_mountpoints.get(controller)
+ if mount_point is None:
+ log_cgroup_info("The {0} controller is not mounted".format(controller), send_event=False)
+ else:
+ log_cgroup_info("The {0} controller is mounted at {1}".format(controller, mount_point), send_event=False)
def start_extension_command(self, extension_name, command, cmd_name, timeout, shell, cwd, env, stdout, stderr,
error_code=ExtensionErrorCodes.PluginUnknownFailure):
@@ -385,25 +403,14 @@ def start_extension_command(self, extension_name, command, cmd_name, timeout, sh
log_cgroup_info("Started extension in unit '{0}'".format(scope_name), send_event=False)
- cpu_cgroup = None
+ cpu_controller = None
try:
cgroup_relative_path = os.path.join('azure.slice/azure-vmextensions.slice', extension_slice_name)
-
- cpu_cgroup_mountpoint, memory_cgroup_mountpoint = self.get_controller_root_paths()
-
- if cpu_cgroup_mountpoint is None:
- log_cgroup_info("The CPU controller is not mounted; will not track resource usage", send_event=False)
- else:
- cpu_cgroup_path = os.path.join(cpu_cgroup_mountpoint, cgroup_relative_path)
- cpu_cgroup = CpuCgroup(extension_name, cpu_cgroup_path)
- CGroupsTelemetry.track_cgroup(cpu_cgroup)
-
- if memory_cgroup_mountpoint is None:
- log_cgroup_info("The Memory controller is not mounted; will not track resource usage", send_event=False)
- else:
- memory_cgroup_path = os.path.join(memory_cgroup_mountpoint, cgroup_relative_path)
- memory_cgroup = MemoryCgroup(extension_name, memory_cgroup_path)
- CGroupsTelemetry.track_cgroup(memory_cgroup)
+ cgroup = self.get_cgroup_from_relative_path(cgroup_relative_path, extension_name)
+ for controller in cgroup.get_controllers():
+ if isinstance(controller, _CpuController):
+ cpu_controller = controller
+ CGroupsTelemetry.track_cgroup_controller(controller)
except IOError as e:
if e.errno == 2: # 'No such file or directory'
@@ -415,7 +422,7 @@ def start_extension_command(self, extension_name, command, cmd_name, timeout, sh
# Wait for process completion or timeout
try:
return handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout,
- stderr=stderr, error_code=error_code, cpu_cgroup=cpu_cgroup)
+ stderr=stderr, error_code=error_code, cpu_controller=cpu_controller)
except ExtensionError as e:
# The extension didn't terminate successfully. Determine whether it was due to systemd errors or
# extension errors.
@@ -448,7 +455,7 @@ class SystemdCgroupApiv2(_SystemdCgroupApi):
def __init__(self):
super(SystemdCgroupApiv2, self).__init__()
self._root_cgroup_path = self._get_root_cgroup_path()
- self._controllers_enabled_at_root = self._get_controllers_enabled_at_root(self._root_cgroup_path) if self._root_cgroup_path is not None else []
+ self._controllers_enabled_at_root = self._get_controllers_enabled_at_root(self._root_cgroup_path) if self._root_cgroup_path != "" else []
@staticmethod
def _get_root_cgroup_path():
@@ -459,7 +466,7 @@ def _get_root_cgroup_path():
$ findmnt -t cgroup2 --noheadings
/sys/fs/cgroup cgroup2 cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot
- Returns None if the root cgroup cannot be determined from the output above.
+ Returns empty string if the root cgroup cannot be determined from the output above.
"""
#
for line in shellutil.run_command(['findmnt', '-t', 'cgroup2', '--noheadings']).splitlines():
@@ -470,7 +477,13 @@ def _get_root_cgroup_path():
root_cgroup_path = match.group('path')
if root_cgroup_path is not None:
return root_cgroup_path
- return None
+ return ""
+
+ def get_root_cgroup_path(self):
+ """
+ Returns the unified cgroup mountpoint.
+ """
+ return self._root_cgroup_path
@staticmethod
def _get_controllers_enabled_at_root(root_cgroup_path):
@@ -478,47 +491,261 @@ def _get_controllers_enabled_at_root(root_cgroup_path):
Returns a list of the controllers enabled at the root cgroup. The cgroup.subtree_control file at the root shows
a space separated list of the controllers which are enabled to control resource distribution from the root
cgroup to its children. If a controller is listed here, then that controller is available to enable in children
- cgroups.
+ cgroups. Returns only the enabled controllers which are supported by the agent.
$ cat /sys/fs/cgroup/cgroup.subtree_control
cpuset cpu io memory hugetlb pids rdma misc
"""
- controllers_enabled_at_root = []
enabled_controllers_file = os.path.join(root_cgroup_path, 'cgroup.subtree_control')
if os.path.exists(enabled_controllers_file):
controllers_enabled_at_root = fileutil.read_file(enabled_controllers_file).rstrip().split()
- return controllers_enabled_at_root
-
- def get_controller_root_paths(self):
- # Return a tuple representing the root cgroups for cpu and memory. Either should be None if the corresponding
- # controller is not enabled at the root. This check is necessary because all non-root "cgroup.subtree_control"
- # files can only contain controllers which are enabled in the parent's "cgroup.subtree_control" file.
-
- root_cpu_path = None
- root_memory_path = None
- if self._root_cgroup_path is not None:
- if 'cpu' in self._controllers_enabled_at_root:
- root_cpu_path = self._root_cgroup_path
- if 'memory' in self._controllers_enabled_at_root:
- root_memory_path = self._root_cgroup_path
-
- return root_cpu_path, root_memory_path
-
- def get_process_cgroup_relative_paths(self, process_id):
- # The contents of the file are similar to
- # # cat /proc/1218/cgroup
- # 0::/azure.slice/walinuxagent.service
- cpu_path = None
- memory_path = None
+ return list(set(controllers_enabled_at_root) & set(CgroupV2.get_supported_controller_names()))
+ return []
+
+ @staticmethod
+ def _get_process_relative_cgroup_path(process_id):
+ """
+ Returns the relative path of the cgroup for the given process.
+ The contents of the /proc/{process_id}/cgroup file are similar to
+ # cat /proc/1218/cgroup
+ 0::/azure.slice/walinuxagent.service
+
+ :param process_id: A numeric PID to return the relative path of, or the string "self" to return the relative path of the current process.
+ """
+ relative_path = ""
for line in fileutil.read_file("/proc/{0}/cgroup".format(process_id)).splitlines():
match = re.match(r'0::(?P\S+)', line)
if match is not None:
- path = match.group('path').lstrip('/') if match.group('path') != '/' else None
- memory_path = path
- cpu_path = path
+ relative_path = match.group('path').lstrip('/') if match.group('path') != '/' else ""
+
+ return relative_path
+
+ def get_unit_cgroup(self, unit_name, cgroup_name):
+ unit_cgroup_relative_path = systemd.get_unit_property(unit_name, "ControlGroup")
+ unit_cgroup_path = ""
- return cpu_path, memory_path
+ if self._root_cgroup_path != "":
+ unit_cgroup_path = os.path.join(self._root_cgroup_path, unit_cgroup_relative_path[1:])
+
+ return CgroupV2(cgroup_name=cgroup_name, root_cgroup_path=self._root_cgroup_path, cgroup_path=unit_cgroup_path, enabled_controllers=self._controllers_enabled_at_root)
+
+ def get_cgroup_from_relative_path(self, relative_path, cgroup_name):
+ cgroup_path = ""
+ if self._root_cgroup_path != "":
+ cgroup_path = os.path.join(self._root_cgroup_path, relative_path)
+
+ return CgroupV2(cgroup_name=cgroup_name, root_cgroup_path=self._root_cgroup_path, cgroup_path=cgroup_path, enabled_controllers=self._controllers_enabled_at_root)
+
+ def get_process_cgroup(self, process_id, cgroup_name):
+ relative_path = self._get_process_relative_cgroup_path(process_id)
+ cgroup_path = ""
+
+ if self._root_cgroup_path != "":
+ cgroup_path = os.path.join(self._root_cgroup_path, relative_path)
+
+ return CgroupV2(cgroup_name=cgroup_name, root_cgroup_path=self._root_cgroup_path, cgroup_path=cgroup_path, enabled_controllers=self._controllers_enabled_at_root)
+
+ def log_root_paths(self):
+ log_cgroup_info("The root cgroup path is {0}".format(self._root_cgroup_path), send_event=False)
+ for controller in CgroupV2.get_supported_controller_names():
+ if controller in self._controllers_enabled_at_root:
+ log_cgroup_info("The {0} controller is enabled at the root cgroup".format(controller), send_event=False)
+ else:
+ log_cgroup_info("The {0} controller is not enabled at the root cgroup".format(controller), send_event=False)
def start_extension_command(self, extension_name, command, cmd_name, timeout, shell, cwd, env, stdout, stderr,
error_code=ExtensionErrorCodes.PluginUnknownFailure):
raise NotImplementedError()
+
+
+class Cgroup(object):
+ MEMORY_CONTROLLER = "memory"
+
+ def __init__(self, cgroup_name):
+ self._cgroup_name = cgroup_name
+
+ @staticmethod
+ def get_supported_controller_names():
+ """
+ Cgroup version specific. Returns a list of the controllers which the agent supports as strings.
+ """
+ raise NotImplementedError()
+
+ def check_in_expected_slice(self, expected_slice):
+ """
+ Cgroup version specific. Returns True if the cgroup is in the expected slice, False otherwise.
+
+ :param expected_slice: The slice the cgroup is expected to be in.
+ """
+ raise NotImplementedError()
+
+ def get_controllers(self, expected_relative_path=None):
+ """
+ Cgroup version specific. Returns a list of the agent supported controllers which are mounted/enabled for the cgroup.
+
+ :param expected_relative_path: The expected relative path of the cgroup. If provided, only controllers mounted
+ at this expected path will be returned.
+ """
+ raise NotImplementedError()
+
+ def get_processes(self):
+ """
+ Cgroup version specific. Returns a list of all the process ids in the cgroup.
+ """
+ raise NotImplementedError()
+
+
+class CgroupV1(Cgroup):
+ CPU_CONTROLLER = "cpu,cpuacct"
+
+ def __init__(self, cgroup_name, controller_mountpoints, controller_paths):
+ """
+ :param cgroup_name: The name of the cgroup. Used for logging/tracking purposes.
+ :param controller_mountpoints: A dictionary of controller-mountpoint mappings for each agent supported controller which is mounted.
+ :param controller_paths: A dictionary of controller-path mappings for each agent supported controller which is mounted. The path represents the absolute path of the controller.
+ """
+ super(CgroupV1, self).__init__(cgroup_name=cgroup_name)
+ self._controller_mountpoints = controller_mountpoints
+ self._controller_paths = controller_paths
+
+ @staticmethod
+ def get_supported_controller_names():
+ return [CgroupV1.CPU_CONTROLLER, CgroupV1.MEMORY_CONTROLLER]
+
+ def check_in_expected_slice(self, expected_slice):
+ in_expected_slice = True
+ for controller, path in self._controller_paths.items():
+ if expected_slice not in path:
+ log_cgroup_warning("The {0} controller for the {1} cgroup is not mounted in the expected slice. Expected slice: {2}. Actual controller path: {3}".format(controller, self._cgroup_name, expected_slice, path), send_event=False)
+ in_expected_slice = False
+
+ return in_expected_slice
+
+ def get_controllers(self, expected_relative_path=None):
+ controllers = []
+
+ for supported_controller_name in self.get_supported_controller_names():
+ controller = None
+ controller_path = self._controller_paths.get(supported_controller_name)
+ controller_mountpoint = self._controller_mountpoints.get(supported_controller_name)
+
+ if controller_mountpoint is None:
+ log_cgroup_warning("{0} controller is not mounted; will not track".format(supported_controller_name), send_event=False)
+ continue
+
+ if controller_path is None:
+ log_cgroup_warning("{0} is not mounted for the {1} cgroup; will not track".format(supported_controller_name, self._cgroup_name), send_event=False)
+ continue
+
+ if expected_relative_path is not None:
+ expected_path = os.path.join(controller_mountpoint, expected_relative_path)
+ if controller_path != expected_path:
+ log_cgroup_warning("The {0} controller is not mounted at the expected path for the {1} cgroup; will not track. Actual cgroup path:[{2}] Expected:[{3}]".format(supported_controller_name, self._cgroup_name, controller_path, expected_path), send_event=False)
+ continue
+
+ if supported_controller_name == self.CPU_CONTROLLER:
+ controller = CpuControllerV1(self._cgroup_name, controller_path)
+ elif supported_controller_name == self.MEMORY_CONTROLLER:
+ controller = MemoryControllerV1(self._cgroup_name, controller_path)
+
+ if controller is not None:
+ msg = "{0} controller for cgroup: {1}".format(supported_controller_name, controller)
+ log_cgroup_info(msg, send_event=False)
+ controllers.append(controller)
+
+ return controllers
+
+ def get_controller_procs_path(self, controller):
+ controller_path = self._controller_paths.get(controller)
+ if controller_path is not None and controller_path != "":
+ return os.path.join(controller_path, "cgroup.procs")
+ return ""
+
+ def get_processes(self):
+ pids = set()
+ for controller in self._controller_paths.keys():
+ procs_path = self.get_controller_procs_path(controller)
+ if os.path.exists(procs_path):
+ with open(procs_path, "r") as cgroup_procs:
+ for pid in cgroup_procs.read().split():
+ pids.add(int(pid))
+ return list(pids)
+
+
+class CgroupV2(Cgroup):
+ CPU_CONTROLLER = "cpu"
+
+ def __init__(self, cgroup_name, root_cgroup_path, cgroup_path, enabled_controllers):
+ """
+ :param cgroup_name: The name of the cgroup. Used for logging/tracking purposes.
+ :param root_cgroup_path: A string representing the root cgroup path. String can be empty.
+ :param cgroup_path: A string representing the absolute cgroup path. String can be empty.
+ :param enabled_controllers: A list of strings representing the agent supported controllers enabled at the root cgroup.
+ """
+ super(CgroupV2, self).__init__(cgroup_name)
+ self._root_cgroup_path = root_cgroup_path
+ self._cgroup_path = cgroup_path
+ self._enabled_controllers = enabled_controllers
+
+ @staticmethod
+ def get_supported_controller_names():
+ return [CgroupV2.CPU_CONTROLLER, CgroupV2.MEMORY_CONTROLLER]
+
+ def check_in_expected_slice(self, expected_slice):
+ if expected_slice not in self._cgroup_path:
+ log_cgroup_warning("The {0} cgroup is not in the expected slice. Expected slice: {1}. Actual cgroup path: {2}".format(self._cgroup_name, expected_slice, self._cgroup_path), send_event=False)
+ return False
+
+ return True
+
+ def get_controllers(self, expected_relative_path=None):
+ controllers = []
+
+ for supported_controller_name in self.get_supported_controller_names():
+ controller = None
+
+ if supported_controller_name not in self._enabled_controllers:
+ log_cgroup_warning("{0} controller is not enabled; will not track".format(supported_controller_name),
+ send_event=False)
+ continue
+
+ if self._cgroup_path == "":
+ log_cgroup_warning("Cgroup path for {0} cannot be determined; will not track".format(self._cgroup_name),
+ send_event=False)
+ continue
+
+ if expected_relative_path is not None:
+ expected_path = os.path.join(self._root_cgroup_path, expected_relative_path)
+ if self._cgroup_path != expected_path:
+ log_cgroup_warning(
+ "The {0} cgroup is not mounted at the expected path; will not track. Actual cgroup path:[{1}] Expected:[{2}]".format(
+ self._cgroup_name, self._cgroup_path, expected_path), send_event=False)
+ continue
+
+ if supported_controller_name == self.CPU_CONTROLLER:
+ controller = CpuControllerV2(self._cgroup_name, self._cgroup_path)
+ elif supported_controller_name == self.MEMORY_CONTROLLER:
+ controller = MemoryControllerV2(self._cgroup_name, self._cgroup_path)
+
+ if controller is not None:
+ msg = "{0} controller for cgroup: {1}".format(supported_controller_name, controller)
+ log_cgroup_info(msg, send_event=False)
+ controllers.append(controller)
+
+ return controllers
+
+ def get_procs_path(self):
+ if self._cgroup_path != "":
+ return os.path.join(self._cgroup_path, "cgroup.procs")
+ return ""
+
+ def get_processes(self):
+ pids = set()
+ procs_path = self.get_procs_path()
+ if os.path.exists(procs_path):
+ with open(procs_path, "r") as cgroup_procs:
+ for pid in cgroup_procs.read().split():
+ pids.add(int(pid))
+ return list(pids)
+
+
diff --git a/azurelinuxagent/ga/cgroupconfigurator.py b/azurelinuxagent/ga/cgroupconfigurator.py
index 72d5329f9..22634bb64 100644
--- a/azurelinuxagent/ga/cgroupconfigurator.py
+++ b/azurelinuxagent/ga/cgroupconfigurator.py
@@ -23,10 +23,12 @@
from azurelinuxagent.common import conf
from azurelinuxagent.common import logger
-from azurelinuxagent.ga.cgroup import CpuCgroup, AGENT_NAME_TELEMETRY, MetricsCounter, MemoryCgroup
+from azurelinuxagent.ga.cgroupcontroller import AGENT_NAME_TELEMETRY, MetricsCounter
from azurelinuxagent.ga.cgroupapi import SystemdRunError, EXTENSION_SLICE_PREFIX, CGroupUtil, SystemdCgroupApiv2, \
log_cgroup_info, log_cgroup_warning, get_cgroup_api, InvalidCgroupMountpointException
from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry
+from azurelinuxagent.ga.cpucontroller import _CpuController
+from azurelinuxagent.ga.memorycontroller import _MemoryController
from azurelinuxagent.common.exception import ExtensionErrorCodes, CGroupsException, AgentMemoryExceededException
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import systemd
@@ -66,18 +68,11 @@
LOGCOLLECTOR_SLICE = "azure-walinuxagent-logcollector.slice"
# More info on resource limits properties in systemd here:
# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/resource_management_guide/sec-modifying_control_groups
-_LOGCOLLECTOR_SLICE_CONTENTS_FMT = """
-[Unit]
-Description=Slice for Azure VM Agent Periodic Log Collector
-DefaultDependencies=no
-Before=slices.target
-[Slice]
-CPUAccounting=yes
-CPUQuota={cpu_quota}
-MemoryAccounting=yes
-"""
-_LOGCOLLECTOR_CPU_QUOTA = "5%"
-LOGCOLLECTOR_MEMORY_LIMIT = 30 * 1024 ** 2 # 30Mb
+LOGCOLLECTOR_CPU_QUOTA_FOR_V1_AND_V2 = "5%"
+LOGCOLLECTOR_MEMORY_THROTTLE_LIMIT_FOR_V2 = "170M"
+LOGCOLLECTOR_MAX_THROTTLED_EVENTS_FOR_V2 = 10
+LOGCOLLECTOR_ANON_MEMORY_LIMIT_FOR_V1_AND_V2 = 25 * 1024 ** 2 # 25Mb
+LOGCOLLECTOR_CACHE_MEMORY_LIMIT_FOR_V1_AND_V2 = 155 * 1024 ** 2 # 155Mb
_AGENT_DROP_IN_FILE_SLICE = "10-Slice.conf"
_AGENT_DROP_IN_FILE_SLICE_CONTENTS = """
@@ -130,9 +125,8 @@ def __init__(self):
self._agent_cgroups_enabled = False
self._extensions_cgroups_enabled = False
self._cgroups_api = None
- self._agent_cpu_cgroup_path = None
- self._agent_memory_cgroup_path = None
- self._agent_memory_cgroup = None
+ self._agent_cgroup = None
+ self._agent_memory_metrics = None
self._check_cgroups_lock = threading.RLock() # Protect the check_cgroups which is called from Monitor thread and main loop.
def initialize(self):
@@ -177,40 +171,53 @@ def initialize(self):
log_cgroup_warning("Unable to determine which cgroup version to use: {0}".format(ustr(e)), send_event=True)
return
+ # TODO: Move this and systemd system check to cgroups_supported logic above
if self.using_cgroup_v2():
log_cgroup_info("Agent and extensions resource monitoring is not currently supported on cgroup v2")
return
+ # We check the agent unit 'Slice' property before setting up azure.slice. This check is done first
+ # because the agent's Slice unit property will be 'azure.slice' if the slice drop-in file exists, even
+ # though systemd has not moved the agent to azure.slice yet. Systemd will only move the agent to
+ # azure.slice after a service restart.
agent_unit_name = systemd.get_agent_unit_name()
agent_slice = systemd.get_unit_property(agent_unit_name, "Slice")
if agent_slice not in (AZURE_SLICE, "system.slice"):
log_cgroup_warning("The agent is within an unexpected slice: {0}".format(agent_slice))
return
+ # Notes about slice setup:
+ # On first agent update (for machines where daemon version did not already create azure.slice), the
+ # agent creates azure.slice and the agent unit Slice drop-in file, but systemd does not move the agent
+ # unit to azure.slice until service restart. It is ok to enable cgroup usage in this case if agent is
+ # running in system.slice.
+
self.__setup_azure_slice()
- cpu_controller_root, memory_controller_root = self.__get_cgroup_controller_roots()
- self._agent_cpu_cgroup_path, self._agent_memory_cgroup_path = self.__get_agent_cgroup_paths(agent_slice,
- cpu_controller_root,
- memory_controller_root)
+ # Log mount points/root paths for cgroup controllers
+ self._cgroups_api.log_root_paths()
+
+ # Get agent cgroup
+ self._agent_cgroup = self._cgroups_api.get_process_cgroup(process_id="self", cgroup_name=AGENT_NAME_TELEMETRY)
if conf.get_cgroup_disable_on_process_check_failure() and self._check_fails_if_processes_found_in_agent_cgroup_before_enable(agent_slice):
reason = "Found unexpected processes in the agent cgroup before agent enable cgroups."
self.disable(reason, DisableCgroups.ALL)
return
- if self._agent_cpu_cgroup_path is not None or self._agent_memory_cgroup_path is not None:
+ # Get controllers to track
+ agent_controllers = self._agent_cgroup.get_controllers(expected_relative_path=os.path.join(agent_slice, systemd.get_agent_unit_name()))
+ if len(agent_controllers) > 0:
self.enable()
- if self._agent_cpu_cgroup_path is not None:
- log_cgroup_info("Agent CPU cgroup: {0}".format(self._agent_cpu_cgroup_path))
- self.__set_cpu_quota(conf.get_agent_cpu_quota())
- CGroupsTelemetry.track_cgroup(CpuCgroup(AGENT_NAME_TELEMETRY, self._agent_cpu_cgroup_path))
-
- if self._agent_memory_cgroup_path is not None:
- log_cgroup_info("Agent Memory cgroup: {0}".format(self._agent_memory_cgroup_path))
- self._agent_memory_cgroup = MemoryCgroup(AGENT_NAME_TELEMETRY, self._agent_memory_cgroup_path)
- CGroupsTelemetry.track_cgroup(self._agent_memory_cgroup)
+ for controller in agent_controllers:
+ for prop in controller.get_unit_properties():
+ log_cgroup_info('Agent {0} unit property value: {1}'.format(prop, systemd.get_unit_property(systemd.get_agent_unit_name(), prop)))
+ if isinstance(controller, _CpuController):
+ self.__set_cpu_quota(conf.get_agent_cpu_quota())
+ elif isinstance(controller, _MemoryController):
+ self._agent_memory_metrics = controller
+ CGroupsTelemetry.track_cgroup_controller(controller)
except Exception as exception:
log_cgroup_warning("Error initializing cgroups: {0}".format(ustr(exception)))
@@ -229,21 +236,6 @@ def __check_no_legacy_cgroups(self):
return False
return True
- def __get_cgroup_controller_roots(self):
- cpu_controller_root, memory_controller_root = self._cgroups_api.get_controller_root_paths()
-
- if cpu_controller_root is not None:
- log_cgroup_info("The CPU cgroup controller root path is {0}".format(cpu_controller_root), send_event=False)
- else:
- log_cgroup_warning("The CPU cgroup controller is not mounted or enabled")
-
- if memory_controller_root is not None:
- log_cgroup_info("The memory cgroup controller root path is {0}".format(memory_controller_root), send_event=False)
- else:
- log_cgroup_warning("The memory cgroup controller is not mounted or enabled")
-
- return cpu_controller_root, memory_controller_root
-
@staticmethod
def __setup_azure_slice():
"""
@@ -292,9 +284,8 @@ def __setup_azure_slice():
if not os.path.exists(vmextensions_slice):
files_to_create.append((vmextensions_slice, _VMEXTENSIONS_SLICE_CONTENTS))
- # Update log collector slice contents
- slice_contents = _LOGCOLLECTOR_SLICE_CONTENTS_FMT.format(cpu_quota=_LOGCOLLECTOR_CPU_QUOTA)
- files_to_create.append((logcollector_slice, slice_contents))
+ # New agent will setup limits for scope instead slice, so removing existing logcollector slice.
+ CGroupConfigurator._Impl.__cleanup_unit_file(logcollector_slice)
if fileutil.findre_in_file(agent_unit_file, r"Slice=") is not None:
CGroupConfigurator._Impl.__cleanup_unit_file(agent_drop_in_file_slice)
@@ -416,47 +407,6 @@ def is_extension_resource_limits_setup_completed(self, extension_name, cpu_quota
return True
return False
- def __get_agent_cgroup_paths(self, agent_slice, cpu_controller_root, memory_controller_root):
- agent_unit_name = systemd.get_agent_unit_name()
-
- expected_relative_path = os.path.join(agent_slice, agent_unit_name)
- cpu_cgroup_relative_path, memory_cgroup_relative_path = self._cgroups_api.get_process_cgroup_relative_paths(
- "self")
-
- if cpu_cgroup_relative_path is None:
- log_cgroup_warning("The agent's process is not within a CPU cgroup")
- else:
- if cpu_cgroup_relative_path == expected_relative_path:
- log_cgroup_info('CPUAccounting: {0}'.format(systemd.get_unit_property(agent_unit_name, "CPUAccounting")))
- log_cgroup_info('CPUQuota: {0}'.format(systemd.get_unit_property(agent_unit_name, "CPUQuotaPerSecUSec")))
- else:
- log_cgroup_warning(
- "The Agent is not in the expected CPU cgroup; will not enable monitoring. Cgroup:[{0}] Expected:[{1}]".format(cpu_cgroup_relative_path, expected_relative_path))
- cpu_cgroup_relative_path = None # Set the path to None to prevent monitoring
-
- if memory_cgroup_relative_path is None:
- log_cgroup_warning("The agent's process is not within a memory cgroup")
- else:
- if memory_cgroup_relative_path == expected_relative_path:
- memory_accounting = systemd.get_unit_property(agent_unit_name, "MemoryAccounting")
- log_cgroup_info('MemoryAccounting: {0}'.format(memory_accounting))
- else:
- log_cgroup_warning(
- "The Agent is not in the expected memory cgroup; will not enable monitoring. CGroup:[{0}] Expected:[{1}]".format(memory_cgroup_relative_path, expected_relative_path))
- memory_cgroup_relative_path = None # Set the path to None to prevent monitoring
-
- if cpu_controller_root is not None and cpu_cgroup_relative_path is not None:
- agent_cpu_cgroup_path = os.path.join(cpu_controller_root, cpu_cgroup_relative_path)
- else:
- agent_cpu_cgroup_path = None
-
- if memory_controller_root is not None and memory_cgroup_relative_path is not None:
- agent_memory_cgroup_path = os.path.join(memory_controller_root, memory_cgroup_relative_path)
- else:
- agent_memory_cgroup_path = None
-
- return agent_cpu_cgroup_path, agent_memory_cgroup_path
-
def supported(self):
return self._cgroups_supported
@@ -496,7 +446,11 @@ def disable(self, reason, disable_cgroups):
elif disable_cgroups == DisableCgroups.AGENT: # disable agent
self._agent_cgroups_enabled = False
self.__reset_agent_cpu_quota()
- CGroupsTelemetry.stop_tracking(CpuCgroup(AGENT_NAME_TELEMETRY, self._agent_cpu_cgroup_path))
+ agent_controllers = self._agent_cgroup.get_controllers()
+ for controller in agent_controllers:
+ if isinstance(controller, _CpuController):
+ CGroupsTelemetry.stop_tracking(controller)
+ break
log_cgroup_warning("Disabling resource usage monitoring. Reason: {0}".format(reason), op=WALAEventOperation.CGroupsDisabled)
@@ -612,11 +566,7 @@ def _check_processes_in_agent_cgroup(self):
"""
unexpected = []
agent_cgroup_proc_names = []
- # Now we call _check_processes_in_agent_cgroup before we enable the cgroups or any one of the controller is not mounted, agent cgroup paths can be None.
- # so we need to check both.
- cgroup_path = self._agent_cpu_cgroup_path if self._agent_cpu_cgroup_path is not None else self._agent_memory_cgroup_path
- if cgroup_path is None:
- return
+
try:
daemon = os.getppid()
extension_handler = os.getpid()
@@ -624,12 +574,12 @@ def _check_processes_in_agent_cgroup(self):
agent_commands.update(shellutil.get_running_commands())
systemd_run_commands = set()
systemd_run_commands.update(self._cgroups_api.get_systemd_run_commands())
- agent_cgroup = self._cgroups_api.get_processes_in_cgroup(cgroup_path)
+ agent_cgroup_proccesses = self._agent_cgroup.get_processes()
# get the running commands again in case new commands started or completed while we were fetching the processes in the cgroup;
agent_commands.update(shellutil.get_running_commands())
systemd_run_commands.update(self._cgroups_api.get_systemd_run_commands())
- for process in agent_cgroup:
+ for process in agent_cgroup_proccesses:
agent_cgroup_proc_names.append(self.__format_process(process))
# Note that the agent uses systemd-run to start extensions; systemd-run belongs to the agent cgroup, though the extensions don't.
if process in (daemon, extension_handler) or process in systemd_run_commands:
@@ -658,6 +608,22 @@ def _check_processes_in_agent_cgroup(self):
self._report_agent_cgroups_procs(agent_cgroup_proc_names, unexpected)
raise CGroupsException("The agent's cgroup includes unexpected processes: {0}".format(unexpected))
+ def get_logcollector_unit_properties(self):
+ """
+ Returns the systemd unit properties for the log collector process.
+
+ Each property should be explicitly set (even if already included in the log collector slice) for the log
+ collector process to run in the transient scope directory with the expected accounting and limits.
+ """
+ logcollector_properties = ["--property=CPUAccounting=yes", "--property=MemoryAccounting=yes", "--property=CPUQuota={0}".format(LOGCOLLECTOR_CPU_QUOTA_FOR_V1_AND_V2)]
+ if not self.using_cgroup_v2():
+ return logcollector_properties
+ # Memory throttling limit is used when running log collector on v2 machines using the 'MemoryHigh' property.
+ # We do not use a systemd property to enforce memory on V1 because it invokes the OOM killer if the limit
+ # is exceeded.
+ logcollector_properties.append("--property=MemoryHigh={0}".format(LOGCOLLECTOR_MEMORY_THROTTLE_LIMIT_FOR_V2))
+ return logcollector_properties
+
@staticmethod
def _get_command(pid):
try:
@@ -753,8 +719,8 @@ def _check_agent_throttled_time(cgroup_metrics):
raise CGroupsException("The agent has been throttled for {0} seconds".format(metric.value))
def check_agent_memory_usage(self):
- if self.enabled() and self._agent_memory_cgroup:
- metrics = self._agent_memory_cgroup.get_tracked_metrics()
+ if self.enabled() and self._agent_memory_metrics is not None:
+ metrics = self._agent_memory_metrics.get_tracked_metrics()
current_usage = 0
for metric in metrics:
if metric.counter == MetricsCounter.TOTAL_MEM_USAGE:
@@ -780,59 +746,37 @@ def _get_parent(pid):
return 0
def start_tracking_unit_cgroups(self, unit_name):
- """
- TODO: Start tracking Memory Cgroups
- """
try:
- cpu_cgroup_path, memory_cgroup_path = self._cgroups_api.get_unit_cgroup_paths(unit_name)
-
- if cpu_cgroup_path is None:
- log_cgroup_info("The CPU controller is not mounted or enabled; will not track resource usage", send_event=False)
- else:
- CGroupsTelemetry.track_cgroup(CpuCgroup(unit_name, cpu_cgroup_path))
+ cgroup = self._cgroups_api.get_unit_cgroup(unit_name, unit_name)
+ controllers = cgroup.get_controllers()
- if memory_cgroup_path is None:
- log_cgroup_info("The Memory controller is not mounted or enabled; will not track resource usage", send_event=False)
- else:
- CGroupsTelemetry.track_cgroup(MemoryCgroup(unit_name, memory_cgroup_path))
+ for controller in controllers:
+ CGroupsTelemetry.track_cgroup_controller(controller)
except Exception as exception:
log_cgroup_info("Failed to start tracking resource usage for the extension: {0}".format(ustr(exception)), send_event=False)
def stop_tracking_unit_cgroups(self, unit_name):
- """
- TODO: remove Memory cgroups from tracked list.
- """
try:
- cpu_cgroup_path, memory_cgroup_path = self._cgroups_api.get_unit_cgroup_paths(unit_name)
+ cgroup = self._cgroups_api.get_unit_cgroup(unit_name, unit_name)
+ controllers = cgroup.get_controllers()
- if cpu_cgroup_path is not None:
- CGroupsTelemetry.stop_tracking(CpuCgroup(unit_name, cpu_cgroup_path))
-
- if memory_cgroup_path is not None:
- CGroupsTelemetry.stop_tracking(MemoryCgroup(unit_name, memory_cgroup_path))
+ for controller in controllers:
+ CGroupsTelemetry.stop_tracking(controller)
except Exception as exception:
log_cgroup_info("Failed to stop tracking resource usage for the extension service: {0}".format(ustr(exception)), send_event=False)
def stop_tracking_extension_cgroups(self, extension_name):
- """
- TODO: remove extension Memory cgroups from tracked list
- """
try:
extension_slice_name = CGroupUtil.get_extension_slice_name(extension_name)
- cgroup_relative_path = os.path.join(_AZURE_VMEXTENSIONS_SLICE,
- extension_slice_name)
-
- cpu_root_path, memory_root_path = self._cgroups_api.get_controller_root_paths()
- cpu_cgroup_path = os.path.join(cpu_root_path, cgroup_relative_path)
- memory_cgroup_path = os.path.join(memory_root_path, cgroup_relative_path)
-
- if cpu_cgroup_path is not None:
- CGroupsTelemetry.stop_tracking(CpuCgroup(extension_name, cpu_cgroup_path))
+ cgroup_relative_path = os.path.join(_AZURE_VMEXTENSIONS_SLICE, extension_slice_name)
- if memory_cgroup_path is not None:
- CGroupsTelemetry.stop_tracking(MemoryCgroup(extension_name, memory_cgroup_path))
+ cgroup = self._cgroups_api.get_cgroup_from_relative_path(relative_path=cgroup_relative_path,
+ cgroup_name=extension_name)
+ controllers = cgroup.get_controllers()
+ for controller in controllers:
+ CGroupsTelemetry.stop_tracking(controller)
except Exception as exception:
log_cgroup_info("Failed to stop tracking resource usage for the extension service: {0}".format(ustr(exception)), send_event=False)
diff --git a/azurelinuxagent/ga/cgroupcontroller.py b/azurelinuxagent/ga/cgroupcontroller.py
new file mode 100644
index 000000000..a530553b2
--- /dev/null
+++ b/azurelinuxagent/ga/cgroupcontroller.py
@@ -0,0 +1,175 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.6+ and Openssl 1.0+
+
+import errno
+import os
+from datetime import timedelta
+
+from azurelinuxagent.common import logger, conf
+from azurelinuxagent.common.exception import CGroupsException
+from azurelinuxagent.common.future import ustr
+from azurelinuxagent.common.utils import fileutil
+
+_REPORT_EVERY_HOUR = timedelta(hours=1)
+_DEFAULT_REPORT_PERIOD = timedelta(seconds=conf.get_cgroup_check_period())
+
+AGENT_NAME_TELEMETRY = "walinuxagent.service" # Name used for telemetry; it needs to be consistent even if the name of the service changes
+AGENT_LOG_COLLECTOR = "azure-walinuxagent-logcollector"
+
+
+class CounterNotFound(Exception):
+ pass
+
+
+class MetricValue(object):
+ """
+ Class for defining all the required metric fields to send telemetry.
+ """
+
+ def __init__(self, category, counter, instance, value, report_period=_DEFAULT_REPORT_PERIOD):
+ self._category = category
+ self._counter = counter
+ self._instance = instance
+ self._value = value
+ self._report_period = report_period
+
+ @property
+ def category(self):
+ return self._category
+
+ @property
+ def counter(self):
+ return self._counter
+
+ @property
+ def instance(self):
+ return self._instance
+
+ @property
+ def value(self):
+ return self._value
+
+ @property
+ def report_period(self):
+ return self._report_period
+
+
+class MetricsCategory(object):
+ MEMORY_CATEGORY = "Memory"
+ CPU_CATEGORY = "CPU"
+
+
+class MetricsCounter(object):
+ PROCESSOR_PERCENT_TIME = "% Processor Time"
+ THROTTLED_TIME = "Throttled Time (s)"
+ TOTAL_MEM_USAGE = "Total Memory Usage (B)"
+ ANON_MEM_USAGE = "Anon Memory Usage (B)"
+ CACHE_MEM_USAGE = "Cache Memory Usage (B)"
+ MAX_MEM_USAGE = "Max Memory Usage (B)"
+ SWAP_MEM_USAGE = "Swap Memory Usage (B)"
+ MEM_THROTTLED = "Total Memory Throttled Events"
+ AVAILABLE_MEM = "Available Memory (MB)"
+ USED_MEM = "Used Memory (MB)"
+
+
+class _CgroupController(object):
+ def __init__(self, name, cgroup_path):
+ """
+ Initialize _data collection for the controller
+ :param: name: Name of the CGroup
+ :param: cgroup_path: Path of the controller
+ :return:
+ """
+ self.name = name
+ self.path = cgroup_path
+
+ def __str__(self):
+ return "{0} [{1}]".format(self.name, self.path)
+
+ def _get_cgroup_file(self, file_name):
+ return os.path.join(self.path, file_name)
+
+ def _get_file_contents(self, file_name):
+ """
+ Retrieve the contents of file.
+
+ :param str file_name: Name of file within that metric controller
+ :return: Entire contents of the file
+ :rtype: str
+ """
+ parameter_file = self._get_cgroup_file(file_name)
+
+ return fileutil.read_file(parameter_file)
+
+ def _get_parameters(self, parameter_name, first_line_only=False):
+ """
+ Retrieve the values of a parameter from a controller.
+ Returns a list of values in the file.
+
+ :param first_line_only: return only the first line.
+ :param str parameter_name: Name of file within that metric controller
+ :return: The first line of the file, without line terminator
+ :rtype: [str]
+ """
+ result = []
+ try:
+ values = self._get_file_contents(parameter_name).splitlines()
+ result = values[0] if first_line_only else values
+ except IndexError:
+ parameter_filename = self._get_cgroup_file(parameter_name)
+ logger.error("File {0} is empty but should not be".format(parameter_filename))
+ raise CGroupsException("File {0} is empty but should not be".format(parameter_filename))
+ except Exception as e:
+ if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101
+ raise e
+ parameter_filename = self._get_cgroup_file(parameter_name)
+ raise CGroupsException("Exception while attempting to read {0}".format(parameter_filename), e)
+ return result
+
+ def is_active(self):
+ """
+ Returns True if any processes belong to the cgroup. In v1, cgroup.procs returns a list of the thread group IDs
+ belong to the cgroup. In v2, cgroup.procs returns a list of the process IDs belonging to the cgroup.
+ """
+ try:
+ procs = self._get_parameters("cgroup.procs")
+ if procs:
+ return len(procs) != 0
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ # only suppressing file not found exceptions.
+ pass
+ else:
+ logger.periodic_warn(logger.EVERY_HALF_HOUR,
+ 'Could not get list of procs from "cgroup.procs" file in the cgroup: {0}.'
+ ' Internal error: {1}'.format(self.path, ustr(e)))
+ except CGroupsException as e:
+ logger.periodic_warn(logger.EVERY_HALF_HOUR,
+ 'Could not get list of procs from "cgroup.procs" file in the cgroup: {0}.'
+ ' Internal error: {1}'.format(self.path, ustr(e)))
+ return False
+
+ def get_tracked_metrics(self, **_):
+ """
+ Retrieves the current value of the metrics tracked for this controller/cgroup and returns them as an array.
+ """
+ raise NotImplementedError()
+
+ def get_unit_properties(self):
+ """
+ Returns a list of the unit properties to collect for the controller.
+ """
+ raise NotImplementedError()
diff --git a/azurelinuxagent/ga/cgroupstelemetry.py b/azurelinuxagent/ga/cgroupstelemetry.py
index 5943b45ad..412f75f4f 100644
--- a/azurelinuxagent/ga/cgroupstelemetry.py
+++ b/azurelinuxagent/ga/cgroupstelemetry.py
@@ -17,7 +17,7 @@
import threading
from azurelinuxagent.common import logger
-from azurelinuxagent.ga.cgroup import CpuCgroup
+from azurelinuxagent.ga.cpucontroller import _CpuController
from azurelinuxagent.common.future import ustr
@@ -37,18 +37,18 @@ def get_track_throttled_time():
return CGroupsTelemetry._track_throttled_time
@staticmethod
- def track_cgroup(cgroup):
+ def track_cgroup_controller(cgroup_controller):
"""
- Adds the given item to the dictionary of tracked cgroups
+ Adds the given item to the dictionary of tracked cgroup controllers
"""
- if isinstance(cgroup, CpuCgroup):
+ if isinstance(cgroup_controller, _CpuController):
# set the current cpu usage
- cgroup.initialize_cpu_usage()
+ cgroup_controller.initialize_cpu_usage()
with CGroupsTelemetry._rlock:
- if not CGroupsTelemetry.is_tracked(cgroup.path):
- CGroupsTelemetry._tracked[cgroup.path] = cgroup
- logger.info("Started tracking cgroup {0}", cgroup)
+ if not CGroupsTelemetry.is_tracked(cgroup_controller.path):
+ CGroupsTelemetry._tracked[cgroup_controller.path] = cgroup_controller
+ logger.info("Started tracking cgroup {0}", cgroup_controller)
@staticmethod
def is_tracked(path):
@@ -75,11 +75,11 @@ def stop_tracking(cgroup):
@staticmethod
def poll_all_tracked():
metrics = []
- inactive_cgroups = []
+ inactive_controllers = []
with CGroupsTelemetry._rlock:
- for cgroup in CGroupsTelemetry._tracked.values():
+ for controller in CGroupsTelemetry._tracked.values():
try:
- metrics.extend(cgroup.get_tracked_metrics(track_throttled_time=CGroupsTelemetry._track_throttled_time))
+ metrics.extend(controller.get_tracked_metrics(track_throttled_time=CGroupsTelemetry._track_throttled_time))
except Exception as e:
# There can be scenarios when the CGroup has been deleted by the time we are fetching the values
# from it. This would raise IOError with file entry not found (ERRNO: 2). We do not want to log
@@ -87,11 +87,11 @@ def poll_all_tracked():
# exceptions which could occur, which is why we do a periodic log for all the other errors.
if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: # pylint: disable=E1101
logger.periodic_warn(logger.EVERY_HOUR, '[PERIODIC] Could not collect metrics for cgroup '
- '{0}. Error : {1}'.format(cgroup.name, ustr(e)))
- if not cgroup.is_active():
- inactive_cgroups.append(cgroup)
- for inactive_cgroup in inactive_cgroups:
- CGroupsTelemetry.stop_tracking(inactive_cgroup)
+ '{0}. Error : {1}'.format(controller.name, ustr(e)))
+ if not controller.is_active():
+ inactive_controllers.append(controller)
+ for inactive_controller in inactive_controllers:
+ CGroupsTelemetry.stop_tracking(inactive_controller)
return metrics
diff --git a/azurelinuxagent/ga/collect_logs.py b/azurelinuxagent/ga/collect_logs.py
index d82933e96..488691a5a 100644
--- a/azurelinuxagent/ga/collect_logs.py
+++ b/azurelinuxagent/ga/collect_logs.py
@@ -25,19 +25,17 @@
import azurelinuxagent.common.conf as conf
from azurelinuxagent.common import logger
-from azurelinuxagent.ga.cgroup import MetricsCounter
-from azurelinuxagent.common.event import elapsed_milliseconds, add_event, WALAEventOperation, report_metric
+from azurelinuxagent.ga.cgroupcontroller import MetricsCounter
+from azurelinuxagent.common.event import elapsed_milliseconds, add_event, WALAEventOperation
from azurelinuxagent.common.future import ustr
from azurelinuxagent.ga.interfaces import ThreadHandlerInterface
from azurelinuxagent.ga.logcollector import COMPRESSED_ARCHIVE_PATH, GRACEFUL_KILL_ERRCODE
-from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator, LOGCOLLECTOR_MEMORY_LIMIT
+from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator, LOGCOLLECTOR_ANON_MEMORY_LIMIT_FOR_V1_AND_V2, LOGCOLLECTOR_CACHE_MEMORY_LIMIT_FOR_V1_AND_V2, LOGCOLLECTOR_MAX_THROTTLED_EVENTS_FOR_V2
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.utils import shellutil
from azurelinuxagent.common.utils.shellutil import CommandError
from azurelinuxagent.common.version import PY_VERSION_MAJOR, PY_VERSION_MINOR, AGENT_NAME, CURRENT_VERSION
-_INITIAL_LOG_COLLECTION_DELAY = 5 * 60 # Five minutes of delay
-
def get_collect_logs_handler():
return CollectLogsHandler()
@@ -46,18 +44,27 @@ def get_collect_logs_handler():
def is_log_collection_allowed():
# There are three conditions that need to be met in order to allow periodic log collection:
# 1) It should be enabled in the configuration.
- # 2) The system must be using cgroups to manage services. Needed for resource limiting of the log collection.
+ # 2) The system must be using cgroups to manage services - needed for resource limiting of the log collection. The
+ # agent currently fully supports resource limiting for v1, but only supports log collector resource limiting for v2
+ # if enabled via configuration.
+ # This condition is True if either:
+ # a. cgroup usage in the agent is enabled; OR
+ # b. the machine is using cgroup v2 and v2 resource limiting is enabled in the configuration.
# 3) The python version must be greater than 2.6 in order to support the ZipFile library used when collecting.
conf_enabled = conf.get_collect_logs()
cgroups_enabled = CGroupConfigurator.get_instance().enabled()
+ cgroup_v2_resource_limiting_enabled = CGroupConfigurator.get_instance().using_cgroup_v2() and conf.get_enable_cgroup_v2_resource_limiting()
supported_python = PY_VERSION_MINOR >= 6 if PY_VERSION_MAJOR == 2 else PY_VERSION_MAJOR == 3
- is_allowed = conf_enabled and cgroups_enabled and supported_python
+ is_allowed = conf_enabled and (cgroups_enabled or cgroup_v2_resource_limiting_enabled) and supported_python
msg = "Checking if log collection is allowed at this time [{0}]. All three conditions must be met: " \
- "configuration enabled [{1}], cgroups enabled [{2}], python supported: [{3}]".format(is_allowed,
- conf_enabled,
- cgroups_enabled,
- supported_python)
+ "1. configuration enabled [{1}], " \
+ "2. cgroups v1 enabled [{2}] OR cgroups v2 is in use and v2 resource limiting configuration enabled [{3}], " \
+ "3. python supported: [{4}]".format(is_allowed,
+ conf_enabled,
+ cgroups_enabled,
+ cgroup_v2_resource_limiting_enabled,
+ supported_python)
logger.info(msg)
add_event(
name=AGENT_NAME,
@@ -144,7 +151,7 @@ def init_protocols(self):
def daemon(self):
# Delay the first collector on start up to give short lived VMs (that might be dead before the second
# collection has a chance to run) an opportunity to do produce meaningful logs to collect.
- time.sleep(_INITIAL_LOG_COLLECTION_DELAY)
+ time.sleep(conf.get_log_collector_initial_delay())
try:
CollectLogsHandler.enable_monitor_cgroups_check()
@@ -171,15 +178,13 @@ def collect_and_send_logs(self):
def _collect_logs(self):
logger.info("Starting log collection...")
- # Invoke the command line tool in the agent to collect logs, with resource limits on CPU.
- # Some distros like ubuntu20 by default cpu and memory accounting enabled. Thus create nested cgroups under the logcollector slice
- # So disabling CPU and Memory accounting prevents from creating nested cgroups, so that all the counters will be present in logcollector Cgroup
-
+ # Invoke the command line tool in the agent to collect logs. The --scope option starts the process as a systemd
+ # transient scope unit. The --property option is used to set systemd memory and cpu properties on the scope.
systemd_cmd = [
- "systemd-run", "--property=CPUAccounting=no", "--property=MemoryAccounting=no",
+ "systemd-run",
"--unit={0}".format(logcollector.CGROUPS_UNIT),
"--slice={0}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE), "--scope"
- ]
+ ] + CGroupConfigurator.get_instance().get_logcollector_unit_properties()
# The log tool is invoked from the current agent's egg with the command line option
collect_logs_cmd = [sys.executable, "-u", sys.argv[0], "-collect-logs"]
@@ -208,8 +213,7 @@ def exec_command():
# pylint has limited (i.e. no) awareness of control flow w.r.t. typing. we disable=no-member
# here because we know e must be a CommandError but pylint still considers the case where
# e is a different type of exception.
- err_msg = ustr("Log Collector exited with code {0}").format(
- e.returncode) # pylint: disable=no-member
+ err_msg = ustr("Log Collector exited with code {0}").format(e.returncode) # pylint: disable=no-member
if e.returncode == logcollector.INVALID_CGROUPS_ERRCODE: # pylint: disable=no-member
logger.info("Disabling periodic log collection until service restart due to process error.")
@@ -262,8 +266,8 @@ def _send_logs(self):
log_event=False)
-def get_log_collector_monitor_handler(cgroups):
- return LogCollectorMonitorHandler(cgroups)
+def get_log_collector_monitor_handler(controllers):
+ return LogCollectorMonitorHandler(controllers)
class LogCollectorMonitorHandler(ThreadHandlerInterface):
@@ -277,12 +281,13 @@ class LogCollectorMonitorHandler(ThreadHandlerInterface):
def get_thread_name():
return LogCollectorMonitorHandler._THREAD_NAME
- def __init__(self, cgroups):
+ def __init__(self, controllers):
self.event_thread = None
self.should_run = True
self.period = 2 # Log collector monitor runs every 2 secs.
- self.cgroups = cgroups
- self.__log_metrics = conf.get_cgroup_log_metrics()
+ self.controllers = controllers
+ self.max_recorded_metrics = {}
+ self.__should_log_metrics = conf.get_cgroup_log_metrics()
def run(self):
self.start()
@@ -312,7 +317,8 @@ def daemon(self):
while not self.stopped():
try:
metrics = self._poll_resource_usage()
- self._send_telemetry(metrics)
+ if self.__should_log_metrics:
+ self._log_metrics(metrics)
self._verify_memory_limit(metrics)
except Exception as e:
logger.error("An error occurred in the log collection monitor thread loop; "
@@ -324,30 +330,54 @@ def daemon(self):
"An error occurred in the MonitorLogCollectorCgroupsHandler thread; will exit the thread.\n{0}",
ustr(e))
+ def get_max_recorded_metrics(self):
+ return self.max_recorded_metrics
+
def _poll_resource_usage(self):
metrics = []
- for cgroup in self.cgroups:
- metrics.extend(cgroup.get_tracked_metrics(track_throttled_time=True))
+ for controller in self.controllers:
+ metrics.extend(controller.get_tracked_metrics(track_throttled_time=True))
+
+ for metric in metrics:
+ current_max = self.max_recorded_metrics.get(metric.counter)
+ self.max_recorded_metrics[metric.counter] = metric.value if current_max is None else max(current_max, metric.value)
+
return metrics
- def _send_telemetry(self, metrics):
+ def _log_metrics(self, metrics):
for metric in metrics:
- report_metric(metric.category, metric.counter, metric.instance, metric.value, log_event=self.__log_metrics)
+ logger.info("Metric {0}/{1} [{2}] = {3}".format(metric.category, metric.counter, metric.instance, metric.value))
def _verify_memory_limit(self, metrics):
- current_usage = 0
+ current_anon_and_swap_usage = 0
+ current_cache_usage = 0
+ memory_throttled_events = 0
for metric in metrics:
- if metric.counter == MetricsCounter.TOTAL_MEM_USAGE:
- current_usage += metric.value
+ if metric.counter == MetricsCounter.ANON_MEM_USAGE:
+ current_anon_and_swap_usage += metric.value
elif metric.counter == MetricsCounter.SWAP_MEM_USAGE:
- current_usage += metric.value
-
- if current_usage > LOGCOLLECTOR_MEMORY_LIMIT:
- msg = "Log collector memory limit {0} bytes exceeded. The max reported usage is {1} bytes.".format(LOGCOLLECTOR_MEMORY_LIMIT, current_usage)
+ current_anon_and_swap_usage += metric.value
+ elif metric.counter == MetricsCounter.CACHE_MEM_USAGE:
+ current_cache_usage = metric.value
+ elif metric.counter == MetricsCounter.MEM_THROTTLED:
+ memory_throttled_events = metric.value
+
+ mem_limit_exceeded = False
+ if current_anon_and_swap_usage > LOGCOLLECTOR_ANON_MEMORY_LIMIT_FOR_V1_AND_V2:
+ mem_limit_exceeded = True
+ msg = "Log collector anon + swap memory limit {0} bytes exceeded. The reported usage is {1} bytes.".format(LOGCOLLECTOR_ANON_MEMORY_LIMIT_FOR_V1_AND_V2, current_anon_and_swap_usage)
logger.info(msg)
- add_event(
- name=AGENT_NAME,
- version=CURRENT_VERSION,
- op=WALAEventOperation.LogCollection,
- message=msg)
+ add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.LogCollection, message=msg)
+ if current_cache_usage > LOGCOLLECTOR_CACHE_MEMORY_LIMIT_FOR_V1_AND_V2:
+ mem_limit_exceeded = True
+ msg = "Log collector cache memory limit {0} bytes exceeded. The reported usage is {1} bytes.".format(LOGCOLLECTOR_CACHE_MEMORY_LIMIT_FOR_V1_AND_V2, current_cache_usage)
+ logger.info(msg)
+ add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.LogCollection, message=msg)
+ if memory_throttled_events > LOGCOLLECTOR_MAX_THROTTLED_EVENTS_FOR_V2:
+ mem_limit_exceeded = True
+ msg = "Log collector memory throttled events limit {0} exceeded. The reported number of throttled events is {1}.".format(LOGCOLLECTOR_MAX_THROTTLED_EVENTS_FOR_V2, memory_throttled_events)
+ logger.info(msg)
+ add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.LogCollection, message=msg)
+
+ if mem_limit_exceeded:
os._exit(GRACEFUL_KILL_ERRCODE)
diff --git a/azurelinuxagent/ga/cpucontroller.py b/azurelinuxagent/ga/cpucontroller.py
new file mode 100644
index 000000000..b4f56dd15
--- /dev/null
+++ b/azurelinuxagent/ga/cpucontroller.py
@@ -0,0 +1,293 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.6+ and Openssl 1.0+
+
+import errno
+import os
+import re
+
+from azurelinuxagent.common.exception import CGroupsException
+from azurelinuxagent.common.future import ustr
+from azurelinuxagent.common.osutil import get_osutil
+from azurelinuxagent.common.utils import fileutil
+from azurelinuxagent.ga.cgroupcontroller import _CgroupController, MetricValue, MetricsCategory, MetricsCounter
+
+re_v1_user_system_times = re.compile(r'user (\d+)\nsystem (\d+)\n')
+re_v2_usage_time = re.compile(r'[\s\S]*usage_usec (\d+)[\s\S]*')
+
+
+class _CpuController(_CgroupController):
+ def __init__(self, name, cgroup_path):
+ super(_CpuController, self).__init__(name, cgroup_path)
+
+ self._osutil = get_osutil()
+ self._previous_cgroup_cpu = None
+ self._previous_system_cpu = None
+ self._current_cgroup_cpu = None
+ self._current_system_cpu = None
+ self._previous_throttled_time = None
+ self._current_throttled_time = None
+
+ def _get_cpu_stat_counter(self, counter_name):
+ """
+ Gets the value for the provided counter in cpu.stat
+ """
+ try:
+ with open(os.path.join(self.path, 'cpu.stat')) as cpu_stat:
+ #
+ # Sample file v1:
+ # # cat cpu.stat
+ # nr_periods 51660
+ # nr_throttled 19461
+ # throttled_time 1529590856339
+ #
+ # Sample file v2
+ # # cat cpu.stat
+ # usage_usec 200161503
+ # user_usec 199388368
+ # system_usec 773134
+ # core_sched.force_idle_usec 0
+ # nr_periods 40059
+ # nr_throttled 40022
+ # throttled_usec 3565247992
+ # nr_bursts 0
+ # burst_usec 0
+ #
+ for line in cpu_stat:
+ match = re.match(r'{0}\s+(\d+)'.format(counter_name), line)
+ if match is not None:
+ return int(match.groups()[0])
+ raise Exception("Cannot find {0}".format(counter_name))
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ return 0
+ raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e)))
+ except Exception as e:
+ raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e)))
+
+ def _cpu_usage_initialized(self):
+ """
+ Returns True if cpu usage has been initialized, False otherwise.
+ """
+ return self._current_cgroup_cpu is not None and self._current_system_cpu is not None
+
+ def initialize_cpu_usage(self):
+ """
+ Sets the initial values of CPU usage. This function must be invoked before calling get_cpu_usage().
+ """
+ raise NotImplementedError()
+
+ def get_cpu_usage(self):
+ """
+ Computes the CPU used by the cgroup since the last call to this function.
+
+ The usage is measured as a percentage of utilization of 1 core in the system. For example,
+ using 1 core all of the time on a 4-core system would be reported as 100%.
+
+ NOTE: initialize_cpu_usage() must be invoked before calling get_cpu_usage()
+ """
+ raise NotImplementedError()
+
+ def get_cpu_throttled_time(self, read_previous_throttled_time=True):
+ """
+ Computes the throttled time (in seconds) since the last call to this function.
+ NOTE: initialize_cpu_usage() must be invoked before calling this function
+ Compute only current throttled time if read_previous_throttled_time set to False
+ """
+ raise NotImplementedError()
+
+ def get_tracked_metrics(self, **kwargs):
+ # Note: If the current cpu usage is less than the previous usage (metric is negative), then an empty array will
+ # be returned and the agent won't track the metrics.
+ tracked = []
+ cpu_usage = self.get_cpu_usage()
+ if cpu_usage >= float(0):
+ tracked.append(MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.PROCESSOR_PERCENT_TIME, self.name, cpu_usage))
+
+ if 'track_throttled_time' in kwargs and kwargs['track_throttled_time']:
+ throttled_time = self.get_cpu_throttled_time()
+ if cpu_usage >= float(0) and throttled_time >= float(0):
+ tracked.append(MetricValue(MetricsCategory.CPU_CATEGORY, MetricsCounter.THROTTLED_TIME, self.name, throttled_time))
+
+ return tracked
+
+ def get_unit_properties(self):
+ return ["CPUAccounting", "CPUQuotaPerSecUSec"]
+
+
+class CpuControllerV1(_CpuController):
+ def initialize_cpu_usage(self):
+ if self._cpu_usage_initialized():
+ raise CGroupsException("initialize_cpu_usage() should be invoked only once")
+ self._current_cgroup_cpu = self._get_cpu_ticks(allow_no_such_file_or_directory_error=True)
+ self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot()
+ self._current_throttled_time = self._get_cpu_stat_counter(counter_name='throttled_time')
+
+ def _get_cpu_ticks(self, allow_no_such_file_or_directory_error=False):
+ """
+ Returns the number of USER_HZ of CPU time (user and system) consumed by this cgroup.
+
+ If allow_no_such_file_or_directory_error is set to True and cpuacct.stat does not exist the function
+ returns 0; this is useful when the function can be called before the cgroup has been created.
+ """
+ try:
+ cpuacct_stat = self._get_file_contents('cpuacct.stat')
+ except Exception as e:
+ if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: # pylint: disable=E1101
+ raise CGroupsException("Failed to read cpuacct.stat: {0}".format(ustr(e)))
+ if not allow_no_such_file_or_directory_error:
+ raise e
+ cpuacct_stat = None
+
+ cpu_ticks = 0
+
+ if cpuacct_stat is not None:
+ #
+ # Sample file:
+ # # cat /sys/fs/cgroup/cpuacct/azure.slice/walinuxagent.service/cpuacct.stat
+ # user 10190
+ # system 3160
+ #
+ match = re_v1_user_system_times.match(cpuacct_stat)
+ if not match:
+ raise CGroupsException("The contents of {0} are invalid: {1}".format(self._get_cgroup_file('cpuacct.stat'), cpuacct_stat))
+ cpu_ticks = int(match.groups()[0]) + int(match.groups()[1])
+
+ return cpu_ticks
+
+ def get_cpu_usage(self):
+ if not self._cpu_usage_initialized():
+ raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_usage()")
+
+ self._previous_cgroup_cpu = self._current_cgroup_cpu
+ self._previous_system_cpu = self._current_system_cpu
+ self._current_cgroup_cpu = self._get_cpu_ticks()
+ self._current_system_cpu = self._osutil.get_total_cpu_ticks_since_boot()
+
+ cgroup_delta = self._current_cgroup_cpu - self._previous_cgroup_cpu
+ system_delta = max(1, self._current_system_cpu - self._previous_system_cpu)
+
+ return round(100.0 * self._osutil.get_processor_cores() * float(cgroup_delta) / float(system_delta), 3)
+
+ def get_cpu_throttled_time(self, read_previous_throttled_time=True):
+ # Throttled time is reported in nanoseconds in v1
+ if not read_previous_throttled_time:
+ return float(self._get_cpu_stat_counter(counter_name='throttled_time') / 1E9)
+
+ if not self._cpu_usage_initialized():
+ raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_throttled_time()")
+
+ self._previous_throttled_time = self._current_throttled_time
+ self._current_throttled_time = self._get_cpu_stat_counter(counter_name='throttled_time')
+
+ return round(float(self._current_throttled_time - self._previous_throttled_time) / 1E9, 3)
+
+
+class CpuControllerV2(_CpuController):
+ @staticmethod
+ def get_system_uptime():
+ """
+ Get the uptime of the system (including time spent in suspend) in seconds.
+ /proc/uptime contains two numbers (values in seconds): the uptime of the system (including time spent in
+ suspend) and the amount of time spent in the idle process:
+ # cat /proc/uptime
+ 365380.48 722644.81
+
+ :return: System uptime in seconds
+ :rtype: float
+ """
+ uptime_contents = fileutil.read_file('/proc/uptime').split()
+ return float(uptime_contents[0])
+
+ def _get_system_usage(self):
+ try:
+ return self.get_system_uptime()
+ except (OSError, IOError) as e:
+ raise CGroupsException("Couldn't read /proc/uptime: {0}".format(ustr(e)))
+ except Exception as e:
+ raise CGroupsException("Couldn't parse /proc/uptime: {0}".format(ustr(e)))
+
+ def initialize_cpu_usage(self):
+ if self._cpu_usage_initialized():
+ raise CGroupsException("initialize_cpu_usage() should be invoked only once")
+ self._current_cgroup_cpu = self._get_cpu_time(allow_no_such_file_or_directory_error=True)
+ self._current_system_cpu = self._get_system_usage()
+ self._current_throttled_time = self._get_cpu_stat_counter(counter_name='throttled_usec')
+
+ def _get_cpu_time(self, allow_no_such_file_or_directory_error=False):
+ """
+ Returns the CPU time (user and system) consumed by this cgroup in seconds.
+
+ If allow_no_such_file_or_directory_error is set to True and cpu.stat does not exist the function
+ returns 0; this is useful when the function can be called before the cgroup has been created.
+ """
+ try:
+ cpu_stat = self._get_file_contents('cpu.stat')
+ except Exception as e:
+ if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT: # pylint: disable=E1101
+ raise CGroupsException("Failed to read cpu.stat: {0}".format(ustr(e)))
+ if not allow_no_such_file_or_directory_error:
+ raise e
+ cpu_stat = None
+
+ cpu_time = 0
+
+ if cpu_stat is not None:
+ #
+ # Sample file:
+ # # cat /sys/fs/cgroup/azure.slice/azure-walinuxagent.slice/azure-walinuxagent-logcollector.slice/collect-logs.scope/cpu.stat
+ # usage_usec 1990707
+ # user_usec 1939858
+ # system_usec 50848
+ # core_sched.force_idle_usec 0
+ # nr_periods 397
+ # nr_throttled 397
+ # throttled_usec 37994949
+ # nr_bursts 0
+ # burst_usec 0
+ #
+ match = re_v2_usage_time.match(cpu_stat)
+ if not match:
+ raise CGroupsException("The contents of {0} are invalid: {1}".format(self._get_cgroup_file('cpu.stat'), cpu_stat))
+ cpu_time = int(match.groups()[0]) / 1E6
+
+ return cpu_time
+
+ def get_cpu_usage(self):
+ if not self._cpu_usage_initialized():
+ raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_usage()")
+
+ self._previous_cgroup_cpu = self._current_cgroup_cpu
+ self._previous_system_cpu = self._current_system_cpu
+ self._current_cgroup_cpu = self._get_cpu_time()
+ self._current_system_cpu = self._get_system_usage()
+
+ cgroup_delta = self._current_cgroup_cpu - self._previous_cgroup_cpu
+ system_delta = max(1.0, self._current_system_cpu - self._previous_system_cpu)
+
+ return round(100.0 * float(cgroup_delta) / float(system_delta), 3)
+
+ def get_cpu_throttled_time(self, read_previous_throttled_time=True):
+ # Throttled time is reported in microseconds in v2
+ if not read_previous_throttled_time:
+ return float(self._get_cpu_stat_counter(counter_name='throttled_usec') / 1E6)
+
+ if not self._cpu_usage_initialized():
+ raise CGroupsException("initialize_cpu_usage() must be invoked before the first call to get_cpu_throttled_time()")
+
+ self._previous_throttled_time = self._current_throttled_time
+ self._current_throttled_time = self._get_cpu_stat_counter(counter_name='throttled_usec')
+
+ return round(float(self._current_throttled_time - self._previous_throttled_time) / 1E6, 3)
diff --git a/azurelinuxagent/ga/extensionprocessutil.py b/azurelinuxagent/ga/extensionprocessutil.py
index d2b37551b..8eb65d459 100644
--- a/azurelinuxagent/ga/extensionprocessutil.py
+++ b/azurelinuxagent/ga/extensionprocessutil.py
@@ -31,7 +31,7 @@
TELEMETRY_MESSAGE_MAX_LEN = 3200
-def wait_for_process_completion_or_timeout(process, timeout, cpu_cgroup):
+def wait_for_process_completion_or_timeout(process, timeout, cpu_controller):
"""
Utility function that waits for the process to complete within the given time frame. This function will terminate
the process if when the given time frame elapses.
@@ -47,7 +47,7 @@ def wait_for_process_completion_or_timeout(process, timeout, cpu_cgroup):
throttled_time = 0
if timeout == 0:
- throttled_time = get_cpu_throttled_time(cpu_cgroup)
+ throttled_time = get_cpu_throttled_time(cpu_controller)
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
else:
# process completed or forked; sleep 1 sec to give the child process (if any) a chance to start
@@ -57,7 +57,7 @@ def wait_for_process_completion_or_timeout(process, timeout, cpu_cgroup):
return timeout == 0, return_code, throttled_time
-def handle_process_completion(process, command, timeout, stdout, stderr, error_code, cpu_cgroup=None):
+def handle_process_completion(process, command, timeout, stdout, stderr, error_code, cpu_controller=None):
"""
Utility function that waits for process completion and retrieves its output (stdout and stderr) if it completed
before the timeout period. Otherwise, the process will get killed and an ExtensionError will be raised.
@@ -68,15 +68,15 @@ def handle_process_completion(process, command, timeout, stdout, stderr, error_c
:param stdout: Must be a file since we seek on it when parsing the subprocess output
:param stderr: Must be a file since we seek on it when parsing the subprocess outputs
:param error_code: The error code to set if we raise an ExtensionError
- :param cpu_cgroup: Reference the cpu cgroup name and path
+ :param cpu_controller: References the cpu controller for the cgroup
:return:
"""
# Wait for process completion or timeout
- timed_out, return_code, throttled_time = wait_for_process_completion_or_timeout(process, timeout, cpu_cgroup)
+ timed_out, return_code, throttled_time = wait_for_process_completion_or_timeout(process, timeout, cpu_controller)
process_output = read_output(stdout, stderr)
if timed_out:
- if cpu_cgroup is not None: # Report CPUThrottledTime when timeout happens
+ if cpu_controller is not None: # Report CPUThrottledTime when timeout happens
raise ExtensionError("Timeout({0});CPUThrottledTime({1}secs): {2}\n{3}".format(timeout, throttled_time, command, process_output),
code=ExtensionErrorCodes.PluginHandlerScriptTimedout)
@@ -211,14 +211,14 @@ def to_s(captured_stdout, stdout_offset, captured_stderr, stderr_offset):
return to_s(stdout, -1*max_len_each, stderr, -1*max_len_each)
-def get_cpu_throttled_time(cpu_cgroup):
+def get_cpu_throttled_time(cpu_controller):
"""
return the throttled time for the given cgroup.
"""
throttled_time = 0
- if cpu_cgroup is not None:
+ if cpu_controller is not None:
try:
- throttled_time = cpu_cgroup.get_cpu_throttled_time(read_previous_throttled_time=False)
+ throttled_time = cpu_controller.get_cpu_throttled_time(read_previous_throttled_time=False)
except Exception as e:
logger.warn("Failed to get cpu throttled time for the extension: {0}", ustr(e))
diff --git a/azurelinuxagent/ga/exthandlers.py b/azurelinuxagent/ga/exthandlers.py
index 3499b706c..b903ba343 100644
--- a/azurelinuxagent/ga/exthandlers.py
+++ b/azurelinuxagent/ga/exthandlers.py
@@ -476,6 +476,7 @@ def handle_ext_handlers(self, goal_state_id):
depends_on_err_msg = None
extensions_enabled = conf.get_extensions_enabled()
+
for extension, ext_handler in all_extensions:
handler_i = ExtHandlerInstance(ext_handler, self.protocol, extension=extension)
@@ -1295,6 +1296,9 @@ def initialize(self):
fileutil.clean_ioerror(e, paths=[self.get_base_dir(), self.pkg_file])
raise ExtensionDownloadError(u"Failed to save HandlerManifest.json", e)
+ man = self.load_manifest()
+ man.report_invalid_boolean_properties(ext_name=self.get_full_name())
+
self.ensure_consistent_data_for_mc()
# Create status and config dir
@@ -1321,7 +1325,7 @@ def set_extension_resource_limits(self):
extension_name = self.get_full_name()
# setup the resource limits for extension operations and it's services.
man = self.load_manifest()
- resource_limits = man.get_resource_limits(extension_name, self.ext_handler.version)
+ resource_limits = man.get_resource_limits()
if not CGroupConfigurator.get_instance().is_extension_resource_limits_setup_completed(extension_name,
cpu_quota=resource_limits.get_extension_slice_cpu_quota()):
CGroupConfigurator.get_instance().setup_extension_slice(
@@ -1391,7 +1395,7 @@ def _enable_extension(self, extension, uninstall_exit_code):
self.__set_extension_state(extension, ExtensionState.Enabled)
# start tracking the extension services cgroup.
- resource_limits = man.get_resource_limits(self.get_full_name(), self.ext_handler.version)
+ resource_limits = man.get_resource_limits()
CGroupConfigurator.get_instance().start_tracking_extension_services_cgroups(
resource_limits.get_service_list())
@@ -1416,9 +1420,17 @@ def disable(self, extension=None, ignore_error=False):
self.report_event(name=self.get_extension_full_name(extension), message=msg, is_success=False,
log_event=False)
- # Clean extension state For Multi Config extensions on Disable
+ #
+ # In the case of multi-config handlers, we keep the state of each extension individually.
+ # Disable can be called when the extension is deleted (the extension state in the goal state is set to "disabled"),
+ # or as part of the Uninstall and Update sequences. When the extension is deleted, we need to remove its state, along
+ # with its status and settings files. Otherwise, we need to set the state to "disabled".
+ #
if self.should_perform_multi_config_op(extension):
- self.__remove_extension_state_files(extension)
+ if extension.state == ExtensionRequestedState.Disabled:
+ self.__remove_extension_state_files(extension)
+ else:
+ self.__set_extension_state(extension, ExtensionState.Disabled)
# For Single config, dont check enabled_extensions because no extension state is maintained.
# For MultiConfig, Set the handler state to Installed only when all extensions have been disabled
@@ -1450,7 +1462,7 @@ def uninstall(self, extension=None):
man = self.load_manifest()
# stop tracking extension services cgroup.
- resource_limits = man.get_resource_limits(self.get_full_name(), self.ext_handler.version)
+ resource_limits = man.get_resource_limits()
CGroupConfigurator.get_instance().stop_tracking_extension_services_cgroups(
resource_limits.get_service_list())
CGroupConfigurator.get_instance().remove_extension_services_drop_in_files(
@@ -2120,14 +2132,6 @@ def get_env_file(self):
def get_log_dir(self):
return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name)
- @staticmethod
- def is_azuremonitorlinuxagent(extension_name):
- cgroup_monitor_extension_name = conf.get_cgroup_monitor_extension_name()
- if re.match(r"\A" + cgroup_monitor_extension_name, extension_name) is not None\
- and datetime.datetime.utcnow() < datetime.datetime.strptime(conf.get_cgroup_monitor_expiry_time(), "%Y-%m-%d"):
- return True
- return False
-
@staticmethod
def _read_status_file(ext_status_file):
err_count = 0
@@ -2229,7 +2233,8 @@ def get_disable_command(self):
return self.data['handlerManifest']["disableCommand"]
def is_report_heartbeat(self):
- return self.data['handlerManifest'].get('reportHeartbeat', False)
+ value = self.data['handlerManifest'].get('reportHeartbeat', False)
+ return self._parse_boolean_value(value, default_val=False)
def is_update_with_install(self):
update_mode = self.data['handlerManifest'].get('updateMode')
@@ -2238,41 +2243,38 @@ def is_update_with_install(self):
return update_mode.lower() == "updatewithinstall"
def is_continue_on_update_failure(self):
- return self.data['handlerManifest'].get('continueOnUpdateFailure', False)
+ value = self.data['handlerManifest'].get('continueOnUpdateFailure', False)
+ return self._parse_boolean_value(value, default_val=False)
def supports_multiple_extensions(self):
- return self.data['handlerManifest'].get('supportsMultipleExtensions', False)
+ value = self.data['handlerManifest'].get('supportsMultipleExtensions', False)
+ return self._parse_boolean_value(value, default_val=False)
+
+ def get_resource_limits(self):
+ return ResourceLimits(self.data.get('resourceLimits', None))
- def get_resource_limits(self, extension_name, str_version):
+ def report_invalid_boolean_properties(self, ext_name):
"""
- Placeholder values for testing and monitoring the monitor extension resource usage.
- This is not effective after nov 30th.
+ Check that the specified keys in the handler manifest has boolean values.
"""
- if ExtHandlerInstance.is_azuremonitorlinuxagent(extension_name):
- if FlexibleVersion(str_version) < FlexibleVersion("1.12"):
- test_man = {
- "resourceLimits": {
- "services": [
- {
- "name": "mdsd.service"
- }
- ]
- }
- }
- return ResourceLimits(test_man.get('resourceLimits', None))
- else:
- test_man = {
- "resourceLimits": {
- "services": [
- {
- "name": "azuremonitoragent.service"
- }
- ]
- }
- }
- return ResourceLimits(test_man.get('resourceLimits', None))
+ for key in ['reportHeartbeat', 'continueOnUpdateFailure', 'supportsMultipleExtensions']:
+ value = self.data['handlerManifest'].get(key)
+ if value is not None and not isinstance(value, bool):
+ msg = "In the handler manifest: '{0}' has a non-boolean value [{1}] for boolean type. Please change it to a boolean value.".format(key, value)
+ logger.info(msg)
+ add_event(name=ext_name, message=msg, op=WALAEventOperation.ExtensionHandlerManifest, log_event=False)
- return ResourceLimits(self.data.get('resourceLimits', None))
+ @staticmethod
+ def _parse_boolean_value(value, default_val):
+ """
+ Expects boolean value but
+ for backward compatibility, 'true' (case-insensitive) is accepted, and other values default to False
+ Note: Json module returns unicode on py2. In py3, unicode removed
+ ustr is a unicode object for Py2 and a str object for Py3.
+ """
+ if not isinstance(value, bool):
+ return True if isinstance(value, ustr) and value.lower() == "true" else default_val
+ return value
class ResourceLimits(object):
diff --git a/azurelinuxagent/ga/logcollector.py b/azurelinuxagent/ga/logcollector.py
index 7d725de22..a6bc042b1 100644
--- a/azurelinuxagent/ga/logcollector.py
+++ b/azurelinuxagent/ga/logcollector.py
@@ -314,21 +314,21 @@ def _get_final_list_for_archive(self, priority_file_queue):
if os.path.getsize(file_path) <= _FILE_SIZE_LIMIT:
final_files_to_collect.append(file_path)
+ total_uncompressed_size += file_size
_LOGGER.info("Adding file %s, size %s b", file_path, file_size)
else:
truncated_file_path = self._truncate_large_file(file_path)
if truncated_file_path:
_LOGGER.info("Adding truncated file %s, size %s b", truncated_file_path, file_size)
final_files_to_collect.append(truncated_file_path)
-
- total_uncompressed_size += file_size
+ total_uncompressed_size += file_size
except IOError as e:
if e.errno == 2: # [Errno 2] No such file or directory
_LOGGER.warning("File %s does not exist, skipping collection for this file", file_path)
_LOGGER.info("Uncompressed archive size is %s b", total_uncompressed_size)
- return final_files_to_collect
+ return final_files_to_collect, total_uncompressed_size
def _create_list_of_files_to_collect(self):
# The final list of files to be collected by zip is created in three steps:
@@ -338,8 +338,8 @@ def _create_list_of_files_to_collect(self):
# the size limit.
parsed_file_paths = self._process_manifest_file()
prioritized_file_paths = self._get_priority_files_list(parsed_file_paths)
- files_to_collect = self._get_final_list_for_archive(prioritized_file_paths)
- return files_to_collect
+ files_to_collect, total_uncompressed_size = self._get_final_list_for_archive(prioritized_file_paths)
+ return files_to_collect, total_uncompressed_size
def collect_logs_and_get_archive(self):
"""
@@ -347,6 +347,7 @@ def collect_logs_and_get_archive(self):
:return: Returns the path of the collected compressed archive
"""
files_to_collect = []
+ total_uncompressed_size = 0
try:
# Clear previous run's output and create base directories if they don't exist already.
@@ -356,7 +357,7 @@ def collect_logs_and_get_archive(self):
_LOGGER.info("Starting log collection at %s", start_time.strftime("%Y-%m-%dT%H:%M:%SZ"))
_LOGGER.info("Using log collection mode %s", "full" if self._is_full_mode else "normal")
- files_to_collect = self._create_list_of_files_to_collect()
+ files_to_collect, total_uncompressed_size = self._create_list_of_files_to_collect()
_LOGGER.info("### Creating compressed archive ###")
compressed_archive = None
@@ -402,7 +403,7 @@ def handle_add_file_to_archive_error(error_count, max_errors, file_to_collect, e
if compressed_archive is not None:
compressed_archive.close()
- return COMPRESSED_ARCHIVE_PATH
+ return COMPRESSED_ARCHIVE_PATH, total_uncompressed_size
except Exception as e:
msg = "Failed to collect logs: {0}".format(ustr(e))
_LOGGER.error(msg)
diff --git a/azurelinuxagent/ga/memorycontroller.py b/azurelinuxagent/ga/memorycontroller.py
new file mode 100644
index 000000000..30e7540cf
--- /dev/null
+++ b/azurelinuxagent/ga/memorycontroller.py
@@ -0,0 +1,220 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.6+ and Openssl 1.0+
+
+import errno
+import os
+import re
+
+from azurelinuxagent.common import logger
+from azurelinuxagent.common.exception import CGroupsException
+from azurelinuxagent.common.future import ustr
+from azurelinuxagent.ga.cgroupcontroller import _CgroupController, CounterNotFound, MetricValue, MetricsCategory, \
+ MetricsCounter, _REPORT_EVERY_HOUR
+
+
+class _MemoryController(_CgroupController):
+ def __init__(self, name, cgroup_path):
+ super(_MemoryController, self).__init__(name, cgroup_path)
+ self._counter_not_found_error_count = 0
+
+ def _get_memory_stat_counter(self, counter_name):
+ """
+ Gets the value for the provided counter in memory.stat
+ """
+ try:
+ with open(os.path.join(self.path, 'memory.stat')) as memory_stat:
+ #
+ # Sample file v1:
+ # # cat memory.stat
+ # cache 0
+ # rss 0
+ # rss_huge 0
+ # shmem 0
+ # mapped_file 0
+ # dirty 0
+ # writeback 0
+ # swap 0
+ # ...
+ #
+ # Sample file v2
+ # # cat memory.stat
+ # anon 0
+ # file 147140608
+ # kernel 1421312
+ # kernel_stack 0
+ # pagetables 0
+ # sec_pagetables 0
+ # percpu 130752
+ # sock 0
+ # ...
+ #
+ for line in memory_stat:
+ re_memory_counter = r'{0}\s+(\d+)'.format(counter_name)
+ match = re.match(re_memory_counter, line)
+ if match is not None:
+ return int(match.groups()[0])
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ raise
+ raise CGroupsException("Failed to read memory.stat: {0}".format(ustr(e)))
+ except Exception as e:
+ raise CGroupsException("Failed to read memory.stat: {0}".format(ustr(e)))
+
+ raise CounterNotFound("Cannot find counter: {0}".format(counter_name))
+
+ def get_memory_usage(self):
+ """
+ Collects anon and cache usage for the cgroup and returns as a tuple
+ Returns anon and cache memory usage for the cgroup as a tuple -> (anon, cache)
+
+ :return: Anon and cache memory usage in bytes
+ :rtype: tuple[int, int]
+ """
+ raise NotImplementedError()
+
+ def try_swap_memory_usage(self):
+ """
+ Collects swap usage for the cgroup
+
+ :return: Memory usage in bytes
+ :rtype: int
+ """
+ raise NotImplementedError()
+
+ def get_max_memory_usage(self):
+ """
+ Collect max memory usage for the cgroup.
+
+ :return: Memory usage in bytes
+ :rtype: int
+ """
+ raise NotImplementedError()
+
+ def get_tracked_metrics(self, **_):
+ # The log collector monitor tracks anon and cache memory separately.
+ anon_mem_usage, cache_mem_usage = self.get_memory_usage()
+ total_mem_usage = anon_mem_usage + cache_mem_usage
+ return [
+ MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.TOTAL_MEM_USAGE, self.name, total_mem_usage),
+ MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.ANON_MEM_USAGE, self.name, anon_mem_usage),
+ MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.CACHE_MEM_USAGE, self.name, cache_mem_usage),
+ MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.MAX_MEM_USAGE, self.name,
+ self.get_max_memory_usage(), _REPORT_EVERY_HOUR),
+ MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.SWAP_MEM_USAGE, self.name,
+ self.try_swap_memory_usage(), _REPORT_EVERY_HOUR)
+ ]
+
+ def get_unit_properties(self):
+ return["MemoryAccounting"]
+
+
+class MemoryControllerV1(_MemoryController):
+ def get_memory_usage(self):
+ # In v1, anon memory is reported in the 'rss' counter
+ return self._get_memory_stat_counter("rss"), self._get_memory_stat_counter("cache")
+
+ def try_swap_memory_usage(self):
+ # In v1, swap memory should be collected from memory.stat, because memory.memsw.usage_in_bytes reports total Memory+SWAP.
+ try:
+ return self._get_memory_stat_counter("swap")
+ except CounterNotFound as e:
+ if self._counter_not_found_error_count < 1:
+ logger.periodic_info(logger.EVERY_HALF_HOUR,
+ '{0} from "memory.stat" file in the cgroup: {1}---[Note: This log for informational purpose only and can be ignored]'.format(ustr(e), self.path))
+ self._counter_not_found_error_count += 1
+ return 0
+
+ def get_max_memory_usage(self):
+ # In v1, max memory usage is reported in memory.max_usage_in_bytes
+ usage = 0
+ try:
+ usage = int(self._get_parameters('memory.max_usage_in_bytes', first_line_only=True))
+ except Exception as e:
+ if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101
+ raise
+ raise CGroupsException("Exception while attempting to read {0}".format("memory.max_usage_in_bytes"), e)
+
+ return usage
+
+
+class MemoryControllerV2(_MemoryController):
+ def get_memory_usage(self):
+ # In v2, cache memory is reported in the 'file' counter
+ return self._get_memory_stat_counter("anon"), self._get_memory_stat_counter("file")
+
+ def get_memory_throttled_events(self):
+ """
+ Returns the number of times processes of the cgroup are throttled and routed to perform memory recliam because
+ the high memory boundary was exceeded.
+
+ :return: Number of memory throttling events for the cgroup
+ :rtype: int
+ """
+ try:
+ with open(os.path.join(self.path, 'memory.events')) as memory_events:
+ #
+ # Sample file:
+ # # cat memory.events
+ # low 0
+ # high 0
+ # max 0
+ # oom 0
+ # oom_kill 0
+ # oom_group_kill 0
+ #
+ for line in memory_events:
+ match = re.match(r'high\s+(\d+)', line)
+ if match is not None:
+ return int(match.groups()[0])
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ raise
+ raise CGroupsException("Failed to read memory.events: {0}".format(ustr(e)))
+ except Exception as e:
+ raise CGroupsException("Failed to read memory.events: {0}".format(ustr(e)))
+
+ raise CounterNotFound("Cannot find memory.events counter: high")
+
+ def try_swap_memory_usage(self):
+ # In v2, swap memory is reported in memory.swap.current
+ usage = 0
+ try:
+ usage = int(self._get_parameters('memory.swap.current', first_line_only=True))
+ except Exception as e:
+ if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101
+ raise
+ raise CGroupsException("Exception while attempting to read {0}".format("memory.swap.current"), e)
+
+ return usage
+
+ def get_max_memory_usage(self):
+ # In v2, max memory usage is reported in memory.peak
+ usage = 0
+ try:
+ usage = int(self._get_parameters('memory.peak', first_line_only=True))
+ except Exception as e:
+ if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT: # pylint: disable=E1101
+ raise
+ raise CGroupsException("Exception while attempting to read {0}".format("memory.peak"), e)
+
+ return usage
+
+ def get_tracked_metrics(self, **_):
+ metrics = super(MemoryControllerV2, self).get_tracked_metrics()
+ throttled_value = MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.MEM_THROTTLED, self.name,
+ self.get_memory_throttled_events())
+ metrics.append(throttled_value)
+ return metrics
diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py
index a5ff29aa0..c1340ed69 100644
--- a/azurelinuxagent/ga/monitor.py
+++ b/azurelinuxagent/ga/monitor.py
@@ -22,7 +22,7 @@
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.networkutil as networkutil
-from azurelinuxagent.ga.cgroup import MetricValue, MetricsCategory, MetricsCounter
+from azurelinuxagent.ga.cgroupcontroller import MetricValue, MetricsCategory, MetricsCounter
from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator
from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.errorstate import ErrorState
@@ -216,10 +216,10 @@ class SendImdsHeartbeat(PeriodicOperation):
Periodic operation to report the IDMS's health. The signal is 'Healthy' when we have successfully called and validated
a response in the last _IMDS_HEALTH_PERIOD.
"""
- def __init__(self, protocol_util, health_service):
+ def __init__(self, health_service):
super(SendImdsHeartbeat, self).__init__(SendImdsHeartbeat._IMDS_HEARTBEAT_PERIOD)
self.health_service = health_service
- self.imds_client = get_imds_client(protocol_util.get_wireserver_endpoint())
+ self.imds_client = get_imds_client()
self.imds_error_state = ErrorState(min_timedelta=SendImdsHeartbeat._IMDS_HEALTH_PERIOD)
_IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
@@ -298,7 +298,7 @@ def daemon(self):
PollResourceUsage(),
PollSystemWideResourceUsage(),
SendHostPluginHeartbeat(protocol, health_service),
- SendImdsHeartbeat(protocol_util, health_service)
+ SendImdsHeartbeat(health_service)
]
report_network_configuration_changes = ReportNetworkConfigurationChanges()
diff --git a/azurelinuxagent/ga/persist_firewall_rules.py b/azurelinuxagent/ga/persist_firewall_rules.py
index a20e2874a..e7c8373ec 100644
--- a/azurelinuxagent/ga/persist_firewall_rules.py
+++ b/azurelinuxagent/ga/persist_firewall_rules.py
@@ -199,8 +199,7 @@ def _setup_network_setup_service(self):
# Create unit file with default values
self.__set_service_unit_file()
- # Reload systemd configurations when we setup the service for the first time to avoid systemctl warnings
- self.__reload_systemd_conf()
+ # After modifying the service, systemctl may issue a warning when checking the service, and daemon-reload should not be used to clear the warning, since it can affect other services
logger.info("Successfully added and enabled the {0}".format(self._network_setup_service_name))
def __setup_binary_file(self):
@@ -297,13 +296,6 @@ def __log_network_setup_service_logs(self):
message=msg,
log_event=False)
- def __reload_systemd_conf(self):
- try:
- logger.info("Executing systemctl daemon-reload for setting up {0}".format(self._network_setup_service_name))
- shellutil.run_command(["systemctl", "daemon-reload"])
- except Exception as exception:
- logger.warn("Unable to reload systemctl configurations: {0}".format(ustr(exception)))
-
def __get_unit_file_version(self):
if not os.path.exists(self.get_service_file_path()):
raise OSError("{0} not found".format(self.get_service_file_path()))
diff --git a/azurelinuxagent/ga/policy/__init__.py b/azurelinuxagent/ga/policy/__init__.py
new file mode 100644
index 000000000..d3897c3d3
--- /dev/null
+++ b/azurelinuxagent/ga/policy/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.6+ and Openssl 1.0+
\ No newline at end of file
diff --git a/azurelinuxagent/ga/policy/policy_engine.py b/azurelinuxagent/ga/policy/policy_engine.py
new file mode 100644
index 000000000..20350dac5
--- /dev/null
+++ b/azurelinuxagent/ga/policy/policy_engine.py
@@ -0,0 +1,55 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.4+ and Openssl 1.0+
+#
+
+from azurelinuxagent.common import logger
+from azurelinuxagent.common.event import WALAEventOperation, add_event
+from azurelinuxagent.common import conf
+from azurelinuxagent.common.exception import AgentError
+
+
+class PolicyError(AgentError):
+ """
+ Error raised during agent policy enforcement.
+ """
+
+
+class PolicyEngine(object):
+ """
+ Implements base policy engine API.
+ """
+ @classmethod
+ def _log_policy(cls, msg, is_success=True, op=WALAEventOperation.Policy, send_event=True):
+ """
+ Log information to console and telemetry.
+ """
+ if is_success:
+ logger.info(msg)
+ else:
+ logger.error(msg)
+ if send_event:
+ add_event(op=op, message=msg, is_success=is_success)
+
+ @staticmethod
+ def is_policy_enforcement_enabled():
+ """
+ Check whether user has opted into policy enforcement feature.
+ Caller function should check this before performing any operations.
+ """
+ # TODO: Add check for policy file present at /etc/waagent_policy.json.
+ # Policy should only be enabled if conf flag is true AND policy file is present.
+ return conf.get_extension_policy_enabled()
+
diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index d2608dc98..6dea1b608 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -31,7 +31,6 @@
from azurelinuxagent.common import conf
from azurelinuxagent.common import logger
-from azurelinuxagent.common.protocol.imds import get_imds_client
from azurelinuxagent.common.utils import fileutil, textutil
from azurelinuxagent.common.agent_supported_feature import get_supported_feature_by_name, SupportedFeatureNames, \
get_agent_supported_features_list_for_crp
@@ -395,7 +394,7 @@ def run(self, debug=False):
self._check_daemon_running(debug)
self._check_threads_running(all_thread_handlers)
self._process_goal_state(exthandlers_handler, remote_access_handler, agent_update_handler)
- self._send_heartbeat_telemetry(protocol)
+ self._send_heartbeat_telemetry(protocol, agent_update_handler)
self._check_agent_memory_usage()
time.sleep(self._goal_state_period)
@@ -475,25 +474,6 @@ def _wait_for_cloud_init(self):
add_event(op=WALAEventOperation.CloudInit, message=message, is_success=False, log_event=False)
self._cloud_init_completed = True # Mark as completed even on error since we will proceed to execute extensions
- def _get_vm_size(self, protocol):
- """
- Including VMSize is meant to capture the architecture of the VM (i.e. arm64 VMs will
- have arm64 included in their vmsize field and amd64 will have no architecture indicated).
- """
- if self._vm_size is None:
-
- imds_client = get_imds_client(protocol.get_endpoint())
-
- try:
- imds_info = imds_client.get_compute()
- self._vm_size = imds_info.vmSize
- except Exception as e:
- err_msg = "Attempts to retrieve VM size information from IMDS are failing: {0}".format(textutil.format_exception(e))
- logger.periodic_warn(logger.EVERY_SIX_HOURS, "[PERIODIC] {0}".format(err_msg))
- return "unknown"
-
- return self._vm_size
-
def _get_vm_arch(self):
return platform.machine()
@@ -1036,27 +1016,27 @@ def _write_pid_file(self):
return pid_files, pid_file
- def _send_heartbeat_telemetry(self, protocol):
+ def _send_heartbeat_telemetry(self, protocol, agent_update_handler):
if self._last_telemetry_heartbeat is None:
self._last_telemetry_heartbeat = datetime.utcnow() - UpdateHandler.TELEMETRY_HEARTBEAT_PERIOD
if datetime.utcnow() >= (self._last_telemetry_heartbeat + UpdateHandler.TELEMETRY_HEARTBEAT_PERIOD):
dropped_packets = self.osutil.get_firewall_dropped_packets(protocol.get_endpoint())
- auto_update_enabled = 1 if conf.get_autoupdate_enabled() else 0
+ auto_update_enabled = 1 if conf.get_auto_update_to_latest_version() else 0
+ update_mode = agent_update_handler.get_current_update_mode()
- telemetry_msg = "{0};{1};{2};{3};{4}".format(self._heartbeat_counter, self._heartbeat_id, dropped_packets,
- self._heartbeat_update_goal_state_error_count,
- auto_update_enabled)
- debug_log_msg = "[DEBUG HeartbeatCounter: {0};HeartbeatId: {1};DroppedPackets: {2};" \
- "UpdateGSErrors: {3};AutoUpdate: {4}]".format(self._heartbeat_counter,
+ # Note: When we add new values to the heartbeat message, please add a semicolon at the end of the value.
+ # This helps to parse the message easily in kusto queries with regex
+ heartbeat_msg = "HeartbeatCounter: {0};HeartbeatId: {1};DroppedPackets: {2};" \
+ "UpdateGSErrors: {3};AutoUpdate: {4};UpdateMode: {5};".format(self._heartbeat_counter,
self._heartbeat_id, dropped_packets,
self._heartbeat_update_goal_state_error_count,
- auto_update_enabled)
+ auto_update_enabled, update_mode)
# Write Heartbeat events/logs
add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True,
- message=telemetry_msg, log_event=False)
- logger.info(u"[HEARTBEAT] Agent {0} is running as the goal state agent {1}", CURRENT_AGENT, debug_log_msg)
+ message=heartbeat_msg, log_event=False)
+ logger.info(u"[HEARTBEAT] Agent {0} is running as the goal state agent [DEBUG {1}]", CURRENT_AGENT, heartbeat_msg)
# Update/Reset the counters
self._heartbeat_counter += 1
diff --git a/azurelinuxagent/pa/deprovision/default.py b/azurelinuxagent/pa/deprovision/default.py
index 35b4ae82e..d96adbfec 100644
--- a/azurelinuxagent/pa/deprovision/default.py
+++ b/azurelinuxagent/pa/deprovision/default.py
@@ -162,7 +162,8 @@ def del_lib_dir_files(self, warnings, actions): # pylint: disable=W0613
'published_hostname',
'fast_track.json',
'initial_goal_state',
- 'rsm_update.json'
+ 'waagent_rsm_update',
+ 'waagent_initial_update'
]
known_files_glob = [
'Extensions.*.xml',
diff --git a/azurelinuxagent/pa/rdma/rdma.py b/azurelinuxagent/pa/rdma/rdma.py
index edd6f2b55..a6e7c3fe6 100644
--- a/azurelinuxagent/pa/rdma/rdma.py
+++ b/azurelinuxagent/pa/rdma/rdma.py
@@ -368,10 +368,6 @@ def update_iboip_interfaces(self, mac_ip_array):
count = 0
for nic in nics:
- # look for IBoIP interface of format ibXXX
- if not re.match(r"ib\w+", nic):
- continue
-
mac_addr = None
with open(os.path.join(net_dir, nic, "address")) as address_file:
mac_addr = address_file.read()
@@ -382,7 +378,11 @@ def update_iboip_interfaces(self, mac_ip_array):
mac_addr = mac_addr.upper()
- match = re.match(r".+(\w\w):(\w\w):(\w\w):\w\w:\w\w:(\w\w):(\w\w):(\w\w)\n", mac_addr)
+ # if this is an IB interface, match IB-specific regex
+ if re.match(r"ib\w+", nic):
+ match = re.match(r".+(\w\w):(\w\w):(\w\w):\w\w:\w\w:(\w\w):(\w\w):(\w\w)\n", mac_addr)
+ else:
+ match = re.match(r"^(\w\w):(\w\w):(\w\w):(\w\w):(\w\w):(\w\w)$", mac_addr)
if not match:
logger.error("RDMA: failed to parse address for device {0} address {1}".format(nic, mac_addr))
continue
diff --git a/setup.py b/setup.py
index 2d51fae8c..0bb053d4c 100755
--- a/setup.py
+++ b/setup.py
@@ -147,7 +147,7 @@ def get_data_files(name, version, fullname): # pylint: disable=R0912
src=["config/clearlinux/waagent.conf"])
set_systemd_files(data_files, dest=systemd_dir_path,
src=["init/clearlinux/waagent.service"])
- elif name == 'mariner':
+ elif name in ["mariner", "azurelinux"]:
set_bin_files(data_files, dest=agent_bin_path)
set_conf_files(data_files, dest="/etc",
src=["config/mariner/waagent.conf"])
diff --git a/tests/common/osutil/test_factory.py b/tests/common/osutil/test_factory.py
index 46bf6a875..5bfb867d4 100644
--- a/tests/common/osutil/test_factory.py
+++ b/tests/common/osutil/test_factory.py
@@ -99,7 +99,7 @@ def test_get_osutil_it_should_return_ubuntu(self):
self.assertEqual(ret.get_service_name(), "walinuxagent")
ret = _get_osutil(distro_name="ubuntu",
- distro_code_name="focal",
+ distro_code_name="noble",
distro_version="24.04",
distro_full_name="")
self.assertTrue(isinstance(ret, Ubuntu18OSUtil))
diff --git a/tests/common/protocol/test_extensions_goal_state_from_extensions_config.py b/tests/common/protocol/test_extensions_goal_state_from_extensions_config.py
index 2a9acff65..eb07be16a 100644
--- a/tests/common/protocol/test_extensions_goal_state_from_extensions_config.py
+++ b/tests/common/protocol/test_extensions_goal_state_from_extensions_config.py
@@ -100,3 +100,16 @@ def test_it_should_parse_is_vm_enabled_for_rsm_upgrades(self):
agent_families = protocol.get_goal_state().extensions_goal_state.agent_families
for family in agent_families:
self.assertFalse(family.is_vm_enabled_for_rsm_upgrades, "is_vm_enabled_for_rsm_upgrades should be False")
+
+ def test_it_should_parse_encoded_signature_plugin_property(self):
+ data_file = wire_protocol_data.DATA_FILE.copy()
+ expected_signature = "MIInEAYJKoZIhvcNAQcCoIInATCCJv0CAQMxDTALBglghkgBZQMEAgIwCQYHgUuDSAcICaCCDXYwggX0MIID3KADAgECAhMzAAADrzBADkyjTQVBAAAAAAOvMA0GCSqGSIb3DQEBCwUAMH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTEwHhcNMjMxMTE2MTkwOTAwWhcNMjQxMTE0MTkwOTAwWjB0MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMR4wHAYDVQQDExVNaWNyb3NvZnQgQ29ycG9yYXRpb24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDOS8s1ra6f0YGtg0OhEaQa/t3Q+q1MEHhWJhqQVuO5amYXQpy8MDPNoJYk+FWAhePP5LxwcSge5aen+f5Q6WNPd6EDxGzotvVpNi5ve0H97S3F7C/axDfKxyNh21MG0W8Sb0vxi/vorcLHOL9i+t2D6yvvDzLlEefUCbQV/zGCBjXGlYJcUj6RAzXyeNANxSpKXAGd7Fh+ocGHPPphcD9LQTOJgG7Y7aYztHqBLJiQQ4eAgZNU4ac6+8LnEGALgo1ydC5BJEuJQjYKbNTy959HrKSu7LO3Ws0w8jw6pYdC1IMpdTkk2puTgY2PDNzBtLM4evG7FYer3WX+8t1UMYNTAgMBAAGjggFzMIIBbzAfBgNVHSUEGDAWBgorBgEEAYI3TAgBBggrBgEFBQcDAzAdBgNVHQ4EFgQURxxxNPIEPGSO8kqz+bgCAQWGXsEwRQYDVR0RBD4wPKQ6MDgxHjAcBgNVBAsTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEWMBQGA1UEBRMNMjMwMDEyKzUwMTgyNjAfBgNVHSMEGDAWgBRIbmTlUAXTgqoXNzcitW2oynUClTBUBgNVHR8ETTBLMEmgR6BFhkNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNDb2RTaWdQQ0EyMDExXzIwMTEtMDctMDguY3JsMGEGCCsGAQUFBwEBBFUwUzBRBggrBgEFBQcwAoZFaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9jZXJ0cy9NaWNDb2RTaWdQQ0EyMDExXzIwMTEtMDctMDguY3J0MAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIBAISxFt/zR2frTFPB45YdmhZpB2nNJoOoi+qlgcTlnO4QwlYN1w/vYwbDy/oFJolD5r6FMJd0RGcgEM8q9TgQ2OC7gQEmhweVJ7yuKJlQBH7P7Pg5RiqgV3cSonJ+OM4kFHbP3gPLiyzssSQdRuPY1mIWoGg9i7Y4ZC8ST7WhpSyc0pns2XsUe1XsIjaUcGu7zd7gg97eCUiLRdVklPmpXobH9CEAWakRUGNICYN2AgjhRTC4j3KJfqMkU04R6Toyh4/Toswm1uoDcGr5laYnTfcX3u5WnJqJLhuPe8Uj9kGAOcyo0O1mNwDa+LhFEzB6CB32+wfJMumfr6degvLTe8x55urQLeTjimBQgS49BSUkhFN7ois3cZyNpnrMca5AZaC7pLI72vuqSsSlLalGOcZmPHZGYJqZ0BacN274OZ80Q8B11iNokns9Od348bMb5Z4fihxaBWebl8kWEi2OPvQImOAeq3nt7UWJBzJYLAGEpfasaA3ZQgIcEXdD+uwo6ymMzDY6UamFOfYqYWXkntxDGu7ngD2ugKUuccYKJJRiiz+LAUcj90BVcSHRLQop9N8zoALr/1sJuwPrVAtxHNEgSW+AKBqIxYWM4Ev32l6agSUAezLMbq5f3d8x9qzT031jMDT+sUAoCw0M5wVtCUQcqINPuYjbS1WgJyZIiEkBMIIHejCCBWKgAwIBAgIKYQ6Q0gAAAAAAAzANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0IFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTEwHhcNMTEwNzA4MjA1OTA5WhcNMjYwNzA4MjEwOTA5WjB+MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSgwJgYDVQQDEx9NaWNyb3NvZnQgQ29kZSBTaWduaW5nIFBDQSAyMDExMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq/D6chAcLq3YbqqCEE00uvK2WCGfQhsqa+laUKq4BjgaBEm6f8MMHt03a8YS2AvwOMKZBrDIOdUBFDFC04kNeWSHfpRgJGyvnkmc6Whe0t+bU7IKLMOv2akrrnoJr9eWWcpgGgXpZnboMlImEi/nqwhQz7NEt13YxC4Ddato88tt8zpcoRb0RrrgOGSsbmQ1eKagYw8t00CT+OPeBw3VXHmlSSnnDb6gE3e+lD3v++MrWhAfTVYoonpy4BI6t0le2O3tQ5GD2Xuye4Yb2T6xjF3oiU+EGvKhL1nkkDstrjNYxbc+/jLTswM9sbKvkjh+0p2ALPVOVpEhNSXDOW5kf1O6nA+tGSOEy/S6A4aN91/w0FK/jJSHvMAhdCVfGCi2zCcoOCWYOUo2z3yxkq4cI6epZuxhH2rhKEmdX4jiJV3TIUs+UsS1Vz8kA/DRelsv1SPjcF0PUUZ3s/gA4bysAoJf28AVs70b1FVL5zmhD+kjSbwYuER8ReTBw3J64HLnJN+/RpnF78IcV9uDjexNSTCnq47f7Fufr/zdsGbiwZeBe+3W7UvnSSmnEyimp31ngOaKYnhfsi+E11ecXL93KCjx7W3DKI8sj0A3T8HhhUSJxAlMxdSlQy90lfdu+HggWCwTXWCVmj5PM4TasIgX3p5O9JawvEagbJjS4NaIjAsCAwEAAaOCAe0wggHpMBAGCSsGAQQBgjcVAQQDAgEAMB0GA1UdDgQWBBRIbmTlUAXTgqoXNzcitW2oynUClTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBRyLToCMZBDuRQFTuHqp8cx0SOJNDBaBgNVHR8EUzBRME+gTaBLhklodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpL2NybC9wcm9kdWN0cy9NaWNSb29DZXJBdXQyMDExXzIwMTFfMDNfMjIuY3JsMF4GCCsGAQUFBwEBBFIwUDBOBggrBgEFBQcwAoZCaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraS9jZXJ0cy9NaWNSb29DZXJBdXQyMDExXzIwMTFfMDNfMjIuY3J0MIGfBgNVHSAEgZcwgZQwgZEGCSsGAQQBgjcuAzCBgzA/BggrBgEFBQcCARYzaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9kb2NzL3ByaW1hcnljcHMuaHRtMEAGCCsGAQUFBwICMDQeMiAdAEwAZQBnAGEAbABfAHAAbwBsAGkAYwB5AF8AcwB0AGEAdABlAG0AZQBuAHQALiAdMA0GCSqGSIb3DQEBCwUAA4ICAQBn8oalmOBUeRou09h0ZyKbC5YR4WOSmUKWfdJ5DJDBZV8uLD74w3LRbYP+vj/oCso7v0epo/Np22O/IjWll11lhJB9i0ZQVdgMknzSGksc8zxCi1LQsP1r4z4HLimb5j0bpdS1HXeUOeLpZMlEPXh6I/MTfaaQdION9MsmAkYqwooQu6SpBQyb7Wj6aC6VoCo/KmtYSWMfCWluWpiW5IP0wI/zRive/DvQvTXvbiWu5a8n7dDd8w6vmSiXmE0OPQvyCInWH8MyGOLwxS3OW560STkKxgrCxq2u5bLZ2xWIUUVYODJxJxp/sfQn+N4sOiBpmLJZiWhub6e3dMNABQamASooPoI/E01mC8CzTfXhj38cbxV9Rad25UAqZaPDXVJihsMdYzaXht/a8/jyFqGaJ+HNpZfQ7l1jQeNbB5yHPgZ3BtEGsXUfFL5hYbXw3MYbBL7fQccOKO7eZS/sl/ahXJbYANahRr1Z85elCUtIEJmAH9AAKcWxm6U/RXceNcbSoqKfenoi+kiVH6v7RyOA9Z74v2u3S5fi63V4GuzqN5l5GEv/1rMjaHXmr/r8i+sLgOppO6/8MO0ETI7f33VtY5E90Z1WTk+/gFcioXgRMiF670EKsT/7qMykXcGhiJtXcVZOSEXAQsmbdlsKgEhr/Xmfwb1tbWrJUnMTDXpQzTGCGWIwghleAgEBMIGVMH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTECEzMAAAOvMEAOTKNNBUEAAAAAA68wCwYJYIZIAWUDBAICoFkwFgYJKoZIhvcNAQkDMQkGB4FLg0gHCAkwPwYJKoZIhvcNAQkEMTIEMDBbd8WC98w2hp0LRsyGXkhY0ZY+y0Pl20deVXonOXR+vDsyK96L9uBzpNRlolZD0DANBgkqhkiG9w0BAQEFAASCAQAIaK9t6Unz6YcKR2q8D2Vjvq9j+YK0U1+tb8s2ZslmmL19Yeb+NRy4tkS7lVEmMYRiFTy+jyis6UGL81ziXEXqAfqjkJt/zjN/8Qek91fzKYJMuCfEm6xVv+gfNHCp0fuGn4b9QNoD7UUMe4oBskSSLSiW0ri9FblSdjeoLZKvoRzHFBF94wI2Kw0iCBUQgNKHKT3lyG9D4NQySAaS0BnYG/s/HPgGMPT6peWRWAXkuTQ8zxb98pOzdf3HZ4Zz2n8qEh1BM6nHba2CKnDP0yjEz7OERVWcLUVPcTHC/xG94cp1gdlKQ09t3H7lBwccxmztUt9sIGUAdeJFAChTvvnSoYIXRDCCF0AGCyqGSIb3DQEJEAIOMYIXLzCCFysGCSqGSIb3DQEHAqCCFxwwghcYAgEDMQ8wDQYJYIZIAWUDBAIBBQAwggFzBgsqhkiG9w0BCRABBKCCAWIEggFeMIIBWgIBAQYKKwYBBAGEWQoDATAxMA0GCWCGSAFlAwQCAQUABCALbe+1JlANO/4xRH8dJHYO8uMX6ee/KhxzL1ZHE4fguAIGZnLzb33XGBMyMDI0MDYyMDIzMzgyOS4yMzNaMASAAgH0AhgsprYE/OXhkFp093+I2SkmqEFqhU3g+VWggdikgdUwgdIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xLTArBgNVBAsTJE1pY3Jvc29mdCBJcmVsYW5kIE9wZXJhdGlvbnMgTGltaXRlZDEmMCQGA1UECxMdVGhhbGVzIFRTUyBFU046ODZERi00QkJDLTkzMzUxJTAjBgNVBAMTHE1pY3Jvc29mdCBUaW1lLVN0YW1wIFNlcnZpY2WgghF4MIIHJzCCBQ+gAwIBAgITMwAAAd1dVx2V1K2qGwABAAAB3TANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYDVQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMDAeFw0yMzEwMTIxOTA3MDlaFw0yNTAxMTAxOTA3MDlaMIHSMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMS0wKwYDVQQLEyRNaWNyb3NvZnQgSXJlbGFuZCBPcGVyYXRpb25zIExpbWl0ZWQxJjAkBgNVBAsTHVRoYWxlcyBUU1MgRVNOOjg2REYtNEJCQy05MzM1MSUwIwYDVQQDExxNaWNyb3NvZnQgVGltZS1TdGFtcCBTZXJ2aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqE4DlETqLnecdREfiWd8oun70m+Km5O1y1qKsLExRKs9LLkJYrYO2uJA/5PnYdds3aDsCS1DWlBltMMYXMrp3Te9hg2sI+4kr49Gw/YU9UOMFfLmastEXMgcctqIBqhsTm8Um6jFnRlZ0owKzxpyOEdSZ9pj7v38JHu434Hj7GMmrC92lT+anSYCrd5qvIf4Aqa/qWStA3zOCtxsKAfCyq++pPqUQWpimLu4qfswBhtJ4t7Skx1q1XkRbo1Wdcxg5NEq4Y9/J8Ep1KG5qUujzyQbupraZsDmXvv5fTokB6wySjJivj/0KAMWMdSlwdI4O6OUUEoyLXrzNF0t6t2lbRsFf0QO7HbMEwxoQrw3LFrAIS4Crv77uS0UBuXeFQq27NgLUVRm5SXYGrpTXtLgIqypHeK0tP2o1xvakAniOsgN2WXlOCip5/mCm/5hy8EzzfhtcU3DK13e6MMPbg/0N3zF9Um+6aOwFBCQrlP+rLcetAny53WcdK+0VWLlJr+5sa5gSlLyAXoYNY3n8pu94WR2yhNUg+jymRaGM+zRDucDn64HFAHjOWMSMrPlZbsEDjCmYWbbh+EGZGNXg1un6fvxyACO8NJ9OUDoNgFy/aTHUkfZ0iFpGdJ45d49PqEwXQiXn3wsy7SvDflWJRZwBCRQ1RPFGeoYXHPnD5m6wwMCAwEAAaOCAUkwggFFMB0GA1UdDgQWBBRuovW2jI9R2kXLIdIMpaPQjiXD8TAfBgNVHSMEGDAWgBSfpxVdAF5iXYP05dJlpxtTNRnpcjBfBgNVHR8EWDBWMFSgUqBQhk5odHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNyb3NvZnQlMjBUaW1lLVN0YW1wJTIwUENBJTIwMjAxMCgxKS5jcmwwbAYIKwYBBQUHAQEEYDBeMFwGCCsGAQUFBzAChlBodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMFRpbWUtU3RhbXAlMjBQQ0ElMjAyMDEwKDEpLmNydDAMBgNVHRMBAf8EAjAAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMIMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQsFAAOCAgEALlTZsg0uBcgdZsxypW5/2ORRP8rzPIsG+7mHwmuphHbP95o7bKjU6hz1KHK/Ft70ZkO7uSRTPFLInUhmSxlnDoUOrrJk1Pc8SMASdESlEEvxL6ZteD47hUtLQtKZvxchmIuxqpnR8MRy/cd4D7/L+oqcJBaReCGloQzAYxDNGSEbBwZ1evXMalDsdPG9+7nvEXFlfUyQqdYUQ0nq6t37i15SBePSeAg7H/+Xdcwrce3xPb7O8Yk0AX7n/moGTuevTv3MgJsVe/G2J003l6hd1b72sAiRL5QYPX0Bl0Gu23p1n450Cq4GIORhDmRV9QwpLfXIdA4aCYXG4I7NOlYdqWuql0iWWzLwo2yPlT2w42JYB3082XIQcdtBkOaL38E2U5jJO3Rh6EtsOi+ZlQ1rOTv0538D3XuaoJ1OqsTHAEZQ9sw/7+91hSpomym6kGdS2M5//voMCFXLx797rNH3w+SmWaWI7ZusvdDesPr5kJV2sYz1GbqFQMEGS9iH5iOYZ1xDkcHpZP1F5zz6oMeZuEuFfhl1pqt3n85d4tuDHZ/svhBBCPcqCqOoM5YidWE0TWBi1NYsd7jzzZ3+Tsu6LQrWDwRmsoPuZo6uwkso8qV6Bx4n0UKpjWwNQpSFFrQQdRb5mQouWiEqtLsXCN2sg1aQ8GBtDOcKN0TabjtCNNswggdxMIIFWaADAgECAhMzAAAAFcXna54Cm0mZAAAAAAAVMA0GCSqGSIb3DQEBCwUAMIGIMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAxMDAeFw0yMTA5MzAxODIyMjVaFw0zMDA5MzAxODMyMjVaMHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAyMDEwMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA5OGmTOe0ciELeaLL1yR5vQ7VgtP97pwHB9KpbE51yMo1V/YBf2xK4OK9uT4XYDP/XE/HZveVU3Fa4n5KWv64NmeFRiMMtY0Tz3cywBAY6GB9alKDRLemjkZrBxTzxXb1hlDcwUTIcVxRMTegCjhuje3XD9gmU3w5YQJ6xKr9cmmvHaus9ja+NSZk2pg7uhp7M62AW36MEBydUv626GIl3GoPz130/o5Tz9bshVZN7928jaTjkY+yOSxRnOlwaQ3KNi1wjjHINSi947SHJMPgyY9+tVSP3PoFVZhtaDuaRr3tpK56KTesy+uDRedGbsoy1cCGMFxPLOJiss254o2I5JasAUq7vnGpF1tnYN74kpEeHT39IM9zfUGaRnXNxF803RKJ1v2lIH1+/NmeRd+2ci/bfV+AutuqfjbsNkz2K26oElHovwUDo9Fzpk03dJQcNIIP8BDyt0cY7afomXw/TNuvXsLz1dhzPUNOwTM5TI4CvEJoLhDqhFFG4tG9ahhaYQFzymeiXtcodgLiMxhy16cg8ML6EgrXY28MyTZki1ugpoMhXV8wdJGUlNi5UPkLiWHzNgY1GIRH29wb0f2y1BzFa/ZcUlFdEtsluq9QBXpsxREdcu+N+VLEhReTwDwV2xo3xwgVGD94q0W29R6HXtqPnhZyacaue7e3PmriLq0CAwEAAaOCAd0wggHZMBIGCSsGAQQBgjcVAQQFAgMBAAEwIwYJKwYBBAGCNxUCBBYEFCqnUv5kxJq+gpE8RjUpzxD/LwTuMB0GA1UdDgQWBBSfpxVdAF5iXYP05dJlpxtTNRnpcjBcBgNVHSAEVTBTMFEGDCsGAQQBgjdMg30BATBBMD8GCCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3MvUmVwb3NpdG9yeS5odG0wEwYDVR0lBAwwCgYIKwYBBQUHAwgwGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAU1fZWy4/oolxiaNE9lJBb186aGMQwVgYDVR0fBE8wTTBLoEmgR4ZFaHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraS9jcmwvcHJvZHVjdHMvTWljUm9vQ2VyQXV0XzIwMTAtMDYtMjMuY3JsMFoGCCsGAQUFBwEBBE4wTDBKBggrBgEFBQcwAoY+aHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraS9jZXJ0cy9NaWNSb29DZXJBdXRfMjAxMC0wNi0yMy5jcnQwDQYJKoZIhvcNAQELBQADggIBAJ1VffwqreEsH2cBMSRb4Z5yS/ypb+pcFLY+TkdkeLEGk5c9MTO1OdfCcTY/2mRsfNB1OW27DzHkwo/7bNGhlBgi7ulmZzpTTd2YurYeeNg2LpypglYAA7AFvonoaeC6Ce5732pvvinLbtg/SHUB2RjebYIM9W0jVOR4U3UkV7ndn/OOPcbzaN9l9qRWqveVtihVJ9AkvUCgvxm2EhIRXT0n4ECWOKz3+SmJw7wXsFSFQrP8DJ6LGYnn8AtqgcKBGUIZUnWKNsIdw2FzLixre24/LAl4FOmRsqlb30mjdAy87JGA0j3mSj5mO0+7hvoyGtmW9I/2kQH2zsZ0/fZMcm8Qq3UwxTSwethQ/gpY3UA8x1RtnWN0SCyxTkctwRQEcb9k+SS+c23Kjgm9swFXSVRk2XPXfx5bRAGOWhmRaw2fpCjcZxkoJLo4S5pu+yFUa2pFEUep8beuyOiJXk+d0tBMdrVXVAmxaQFEfnyhYWxz/gq77EFmPWn9y8FBSX5+k77L+DvktxW/tM4+pTFRhLy/AsGConsXHRWJjXD+57XQKBqJC4822rpM+Zv/Cuk0+CQ1ZyvgDbjmjJnW4SLq8CdCPSWU5nR0W2rRnj7tfqAxM328y+l7vzhwRNGQ8cirOoo6CGJ/2XBjU02N7oJtpQUQwXEGahC0HVUzWLOhcGbyoYIC1DCCAj0CAQEwggEAoYHYpIHVMIHSMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMS0wKwYDVQQLEyRNaWNyb3NvZnQgSXJlbGFuZCBPcGVyYXRpb25zIExpbWl0ZWQxJjAkBgNVBAsTHVRoYWxlcyBUU1MgRVNOOjg2REYtNEJCQy05MzM1MSUwIwYDVQQDExxNaWNyb3NvZnQgVGltZS1TdGFtcCBTZXJ2aWNloiMKAQEwBwYFKw4DAhoDFQA2I0cZZds1oM/GfKINsQ5yJKMWEKCBgzCBgKR+MHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAyMDEwMA0GCSqGSIb3DQEBBQUAAgUA6h4aiTAiGA8yMDI0MDYyMDExMDMzN1oYDzIwMjQwNjIxMTEwMzM3WjB0MDoGCisGAQQBhFkKBAExLDAqMAoCBQDqHhqJAgEAMAcCAQACAgX7MAcCAQACAhH8MAoCBQDqH2wJAgEAMDYGCisGAQQBhFkKBAIxKDAmMAwGCisGAQQBhFkKAwKgCjAIAgEAAgMHoSChCjAIAgEAAgMBhqAwDQYJKoZIhvcNAQEFBQADgYEAGfu+JpdwJYpU+xUOu693Nef9bUv1la7pxXUtY+P82b5q8/FFZp5WUobGx6JrVuJTDuvqbEZYjwTzWIVUHog1kTXjji1NCFLCVnrlJqPwtH9uRQhnFDSmiP0tG1rNwht6ZViFrRexp+7cebOHSPfk+ZzrUyp9DptMAJmagfLClxAxggQNMIIECQIBATCBkzB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYDVQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMAITMwAAAd1dVx2V1K2qGwABAAAB3TANBglghkgBZQMEAgEFAKCCAUowGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEEMC8GCSqGSIb3DQEJBDEiBCCZX/UOu+vfJ4kbHbQYoi1Ztz4aZycnWIB1vBYNNo/atDCB+gYLKoZIhvcNAQkQAi8xgeowgecwgeQwgb0EIGH/Di2aZaxPeJmce0fRWTftQI3TaVHFj5GI43rAMWNmMIGYMIGApH4wfDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEmMCQGA1UEAxMdTWljcm9zb2Z0IFRpbWUtU3RhbXAgUENBIDIwMTACEzMAAAHdXVcdldStqhsAAQAAAd0wIgQg5Fd0dBTHG2u3SYEF2YcmJ7rHH4kHcV0GlSr/y6AQOYEwDQYJKoZIhvcNAQELBQAEggIAGcOQBnVMUPnu4d2wmccNjUncMe5i0C5VkJ7/VjqN4W6vSuKz7BFVIaUMoufkY94epjipx+Ip3BTj2heew7xB+f6zBKTlkXfakH7TEWeju3WzUYNt3kjJyS3SJeJGFJEiln1S6apObwPtbSq9EqwwFOt8pJy9bAvoxuRM6Olib/eiHr3uiKkk6FCccUgG0PYN/PRUU7htzv6uyRXzCpuNpld3eorXt6nqt6bP7k1NFcwcYSv7V3WcoQzObk5Y9G5n/1rc5Hy9eRHwnz1l7MWOZGsJ9swOBFmoVUK8tB1vPy3bjooJBm7jRT9AcdGTaRS/t5nYe5sECI51sIyq3UBPCH8rNse1BIX9WCtcar1Bg6L64lzdPC7FVSh03vVlDZhNNf7tWRZqlYID2zTaY4p4LIW47O0/Rw2Swe4+hvl49e0v0m0FnmmwXN5097waF3Xv7FIDxbcrK+0DTv2p810Igwj6tErwxhP/367Q9EBzxODSJ8uD35DGMmHsTnViavQUBzj8LeTiA6sUZhF54AbI5dQkZLPydlR3GCmo1RKKO1VhDZnpFanj/N856MOlQqe/6x8sguPM+OpF6MWGvQH5SxsSzSf6dxhzS2pEHbirwJ4k1+tuF0LKOxNLwVVQQ9qPABNiWqml4bJk9oZ1dOTDd9EFjepHqynKk4olY3kq5sA="
+ with mock_wire_protocol(data_file) as protocol:
+ extensions = protocol.get_goal_state().extensions_goal_state.extensions
+ self.assertEqual(expected_signature, extensions[0].encoded_signature)
+
+ data_file["ext_conf"] = "wire/ext_conf-no_encoded_signature.xml"
+ with mock_wire_protocol(data_file) as protocol:
+ extensions = protocol.get_goal_state().extensions_goal_state.extensions
+ # extension.encoded_signature should be None if property is not in the EGS for the extension
+ self.assertIsNone(extensions[0].encoded_signature)
diff --git a/tests/common/protocol/test_extensions_goal_state_from_vm_settings.py b/tests/common/protocol/test_extensions_goal_state_from_vm_settings.py
index 771fa2206..3124f42a1 100644
--- a/tests/common/protocol/test_extensions_goal_state_from_vm_settings.py
+++ b/tests/common/protocol/test_extensions_goal_state_from_vm_settings.py
@@ -161,6 +161,19 @@ def test_its_source_channel_should_be_host_ga_plugin(self):
self.assertEqual(GoalStateChannel.HostGAPlugin, extensions_goal_state.channel, "The channel is incorrect")
+ def test_it_should_parse_encoded_signature_plugin_property(self):
+ data_file = wire_protocol_data.DATA_FILE_VM_SETTINGS.copy()
+ # This vm settings extensions goal state has 1 extension with encodedSignature (AzureMonitorLinuxAgent). The
+ # remaining extensions do not have encodedSignature
+ expected_signature = "MIInEAYJKoZIhvcNAQcCoIInATCCJv0CAQMxDTALBglghkgBZQMEAgIwCQYHgUuDSAcICaCCDXYwggX0MIID3KADAgECAhMzAAADrzBADkyjTQVBAAAAAAOvMA0GCSqGSIb3DQEBCwUAMH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTEwHhcNMjMxMTE2MTkwOTAwWhcNMjQxMTE0MTkwOTAwWjB0MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMR4wHAYDVQQDExVNaWNyb3NvZnQgQ29ycG9yYXRpb24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDOS8s1ra6f0YGtg0OhEaQa/t3Q+q1MEHhWJhqQVuO5amYXQpy8MDPNoJYk+FWAhePP5LxwcSge5aen+f5Q6WNPd6EDxGzotvVpNi5ve0H97S3F7C/axDfKxyNh21MG0W8Sb0vxi/vorcLHOL9i+t2D6yvvDzLlEefUCbQV/zGCBjXGlYJcUj6RAzXyeNANxSpKXAGd7Fh+ocGHPPphcD9LQTOJgG7Y7aYztHqBLJiQQ4eAgZNU4ac6+8LnEGALgo1ydC5BJEuJQjYKbNTy959HrKSu7LO3Ws0w8jw6pYdC1IMpdTkk2puTgY2PDNzBtLM4evG7FYer3WX+8t1UMYNTAgMBAAGjggFzMIIBbzAfBgNVHSUEGDAWBgorBgEEAYI3TAgBBggrBgEFBQcDAzAdBgNVHQ4EFgQURxxxNPIEPGSO8kqz+bgCAQWGXsEwRQYDVR0RBD4wPKQ6MDgxHjAcBgNVBAsTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEWMBQGA1UEBRMNMjMwMDEyKzUwMTgyNjAfBgNVHSMEGDAWgBRIbmTlUAXTgqoXNzcitW2oynUClTBUBgNVHR8ETTBLMEmgR6BFhkNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNDb2RTaWdQQ0EyMDExXzIwMTEtMDctMDguY3JsMGEGCCsGAQUFBwEBBFUwUzBRBggrBgEFBQcwAoZFaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9jZXJ0cy9NaWNDb2RTaWdQQ0EyMDExXzIwMTEtMDctMDguY3J0MAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIBAISxFt/zR2frTFPB45YdmhZpB2nNJoOoi+qlgcTlnO4QwlYN1w/vYwbDy/oFJolD5r6FMJd0RGcgEM8q9TgQ2OC7gQEmhweVJ7yuKJlQBH7P7Pg5RiqgV3cSonJ+OM4kFHbP3gPLiyzssSQdRuPY1mIWoGg9i7Y4ZC8ST7WhpSyc0pns2XsUe1XsIjaUcGu7zd7gg97eCUiLRdVklPmpXobH9CEAWakRUGNICYN2AgjhRTC4j3KJfqMkU04R6Toyh4/Toswm1uoDcGr5laYnTfcX3u5WnJqJLhuPe8Uj9kGAOcyo0O1mNwDa+LhFEzB6CB32+wfJMumfr6degvLTe8x55urQLeTjimBQgS49BSUkhFN7ois3cZyNpnrMca5AZaC7pLI72vuqSsSlLalGOcZmPHZGYJqZ0BacN274OZ80Q8B11iNokns9Od348bMb5Z4fihxaBWebl8kWEi2OPvQImOAeq3nt7UWJBzJYLAGEpfasaA3ZQgIcEXdD+uwo6ymMzDY6UamFOfYqYWXkntxDGu7ngD2ugKUuccYKJJRiiz+LAUcj90BVcSHRLQop9N8zoALr/1sJuwPrVAtxHNEgSW+AKBqIxYWM4Ev32l6agSUAezLMbq5f3d8x9qzT031jMDT+sUAoCw0M5wVtCUQcqINPuYjbS1WgJyZIiEkBMIIHejCCBWKgAwIBAgIKYQ6Q0gAAAAAAAzANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0IFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTEwHhcNMTEwNzA4MjA1OTA5WhcNMjYwNzA4MjEwOTA5WjB+MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSgwJgYDVQQDEx9NaWNyb3NvZnQgQ29kZSBTaWduaW5nIFBDQSAyMDExMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq/D6chAcLq3YbqqCEE00uvK2WCGfQhsqa+laUKq4BjgaBEm6f8MMHt03a8YS2AvwOMKZBrDIOdUBFDFC04kNeWSHfpRgJGyvnkmc6Whe0t+bU7IKLMOv2akrrnoJr9eWWcpgGgXpZnboMlImEi/nqwhQz7NEt13YxC4Ddato88tt8zpcoRb0RrrgOGSsbmQ1eKagYw8t00CT+OPeBw3VXHmlSSnnDb6gE3e+lD3v++MrWhAfTVYoonpy4BI6t0le2O3tQ5GD2Xuye4Yb2T6xjF3oiU+EGvKhL1nkkDstrjNYxbc+/jLTswM9sbKvkjh+0p2ALPVOVpEhNSXDOW5kf1O6nA+tGSOEy/S6A4aN91/w0FK/jJSHvMAhdCVfGCi2zCcoOCWYOUo2z3yxkq4cI6epZuxhH2rhKEmdX4jiJV3TIUs+UsS1Vz8kA/DRelsv1SPjcF0PUUZ3s/gA4bysAoJf28AVs70b1FVL5zmhD+kjSbwYuER8ReTBw3J64HLnJN+/RpnF78IcV9uDjexNSTCnq47f7Fufr/zdsGbiwZeBe+3W7UvnSSmnEyimp31ngOaKYnhfsi+E11ecXL93KCjx7W3DKI8sj0A3T8HhhUSJxAlMxdSlQy90lfdu+HggWCwTXWCVmj5PM4TasIgX3p5O9JawvEagbJjS4NaIjAsCAwEAAaOCAe0wggHpMBAGCSsGAQQBgjcVAQQDAgEAMB0GA1UdDgQWBBRIbmTlUAXTgqoXNzcitW2oynUClTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBRyLToCMZBDuRQFTuHqp8cx0SOJNDBaBgNVHR8EUzBRME+gTaBLhklodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpL2NybC9wcm9kdWN0cy9NaWNSb29DZXJBdXQyMDExXzIwMTFfMDNfMjIuY3JsMF4GCCsGAQUFBwEBBFIwUDBOBggrBgEFBQcwAoZCaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraS9jZXJ0cy9NaWNSb29DZXJBdXQyMDExXzIwMTFfMDNfMjIuY3J0MIGfBgNVHSAEgZcwgZQwgZEGCSsGAQQBgjcuAzCBgzA/BggrBgEFBQcCARYzaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9kb2NzL3ByaW1hcnljcHMuaHRtMEAGCCsGAQUFBwICMDQeMiAdAEwAZQBnAGEAbABfAHAAbwBsAGkAYwB5AF8AcwB0AGEAdABlAG0AZQBuAHQALiAdMA0GCSqGSIb3DQEBCwUAA4ICAQBn8oalmOBUeRou09h0ZyKbC5YR4WOSmUKWfdJ5DJDBZV8uLD74w3LRbYP+vj/oCso7v0epo/Np22O/IjWll11lhJB9i0ZQVdgMknzSGksc8zxCi1LQsP1r4z4HLimb5j0bpdS1HXeUOeLpZMlEPXh6I/MTfaaQdION9MsmAkYqwooQu6SpBQyb7Wj6aC6VoCo/KmtYSWMfCWluWpiW5IP0wI/zRive/DvQvTXvbiWu5a8n7dDd8w6vmSiXmE0OPQvyCInWH8MyGOLwxS3OW560STkKxgrCxq2u5bLZ2xWIUUVYODJxJxp/sfQn+N4sOiBpmLJZiWhub6e3dMNABQamASooPoI/E01mC8CzTfXhj38cbxV9Rad25UAqZaPDXVJihsMdYzaXht/a8/jyFqGaJ+HNpZfQ7l1jQeNbB5yHPgZ3BtEGsXUfFL5hYbXw3MYbBL7fQccOKO7eZS/sl/ahXJbYANahRr1Z85elCUtIEJmAH9AAKcWxm6U/RXceNcbSoqKfenoi+kiVH6v7RyOA9Z74v2u3S5fi63V4GuzqN5l5GEv/1rMjaHXmr/r8i+sLgOppO6/8MO0ETI7f33VtY5E90Z1WTk+/gFcioXgRMiF670EKsT/7qMykXcGhiJtXcVZOSEXAQsmbdlsKgEhr/Xmfwb1tbWrJUnMTDXpQzTGCGWIwghleAgEBMIGVMH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTECEzMAAAOvMEAOTKNNBUEAAAAAA68wCwYJYIZIAWUDBAICoFkwFgYJKoZIhvcNAQkDMQkGB4FLg0gHCAkwPwYJKoZIhvcNAQkEMTIEMDBbd8WC98w2hp0LRsyGXkhY0ZY+y0Pl20deVXonOXR+vDsyK96L9uBzpNRlolZD0DANBgkqhkiG9w0BAQEFAASCAQAIaK9t6Unz6YcKR2q8D2Vjvq9j+YK0U1+tb8s2ZslmmL19Yeb+NRy4tkS7lVEmMYRiFTy+jyis6UGL81ziXEXqAfqjkJt/zjN/8Qek91fzKYJMuCfEm6xVv+gfNHCp0fuGn4b9QNoD7UUMe4oBskSSLSiW0ri9FblSdjeoLZKvoRzHFBF94wI2Kw0iCBUQgNKHKT3lyG9D4NQySAaS0BnYG/s/HPgGMPT6peWRWAXkuTQ8zxb98pOzdf3HZ4Zz2n8qEh1BM6nHba2CKnDP0yjEz7OERVWcLUVPcTHC/xG94cp1gdlKQ09t3H7lBwccxmztUt9sIGUAdeJFAChTvvnSoYIXRDCCF0AGCyqGSIb3DQEJEAIOMYIXLzCCFysGCSqGSIb3DQEHAqCCFxwwghcYAgEDMQ8wDQYJYIZIAWUDBAIBBQAwggFzBgsqhkiG9w0BCRABBKCCAWIEggFeMIIBWgIBAQYKKwYBBAGEWQoDATAxMA0GCWCGSAFlAwQCAQUABCALbe+1JlANO/4xRH8dJHYO8uMX6ee/KhxzL1ZHE4fguAIGZnLzb33XGBMyMDI0MDYyMDIzMzgyOS4yMzNaMASAAgH0AhgsprYE/OXhkFp093+I2SkmqEFqhU3g+VWggdikgdUwgdIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xLTArBgNVBAsTJE1pY3Jvc29mdCBJcmVsYW5kIE9wZXJhdGlvbnMgTGltaXRlZDEmMCQGA1UECxMdVGhhbGVzIFRTUyBFU046ODZERi00QkJDLTkzMzUxJTAjBgNVBAMTHE1pY3Jvc29mdCBUaW1lLVN0YW1wIFNlcnZpY2WgghF4MIIHJzCCBQ+gAwIBAgITMwAAAd1dVx2V1K2qGwABAAAB3TANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYDVQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMDAeFw0yMzEwMTIxOTA3MDlaFw0yNTAxMTAxOTA3MDlaMIHSMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMS0wKwYDVQQLEyRNaWNyb3NvZnQgSXJlbGFuZCBPcGVyYXRpb25zIExpbWl0ZWQxJjAkBgNVBAsTHVRoYWxlcyBUU1MgRVNOOjg2REYtNEJCQy05MzM1MSUwIwYDVQQDExxNaWNyb3NvZnQgVGltZS1TdGFtcCBTZXJ2aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqE4DlETqLnecdREfiWd8oun70m+Km5O1y1qKsLExRKs9LLkJYrYO2uJA/5PnYdds3aDsCS1DWlBltMMYXMrp3Te9hg2sI+4kr49Gw/YU9UOMFfLmastEXMgcctqIBqhsTm8Um6jFnRlZ0owKzxpyOEdSZ9pj7v38JHu434Hj7GMmrC92lT+anSYCrd5qvIf4Aqa/qWStA3zOCtxsKAfCyq++pPqUQWpimLu4qfswBhtJ4t7Skx1q1XkRbo1Wdcxg5NEq4Y9/J8Ep1KG5qUujzyQbupraZsDmXvv5fTokB6wySjJivj/0KAMWMdSlwdI4O6OUUEoyLXrzNF0t6t2lbRsFf0QO7HbMEwxoQrw3LFrAIS4Crv77uS0UBuXeFQq27NgLUVRm5SXYGrpTXtLgIqypHeK0tP2o1xvakAniOsgN2WXlOCip5/mCm/5hy8EzzfhtcU3DK13e6MMPbg/0N3zF9Um+6aOwFBCQrlP+rLcetAny53WcdK+0VWLlJr+5sa5gSlLyAXoYNY3n8pu94WR2yhNUg+jymRaGM+zRDucDn64HFAHjOWMSMrPlZbsEDjCmYWbbh+EGZGNXg1un6fvxyACO8NJ9OUDoNgFy/aTHUkfZ0iFpGdJ45d49PqEwXQiXn3wsy7SvDflWJRZwBCRQ1RPFGeoYXHPnD5m6wwMCAwEAAaOCAUkwggFFMB0GA1UdDgQWBBRuovW2jI9R2kXLIdIMpaPQjiXD8TAfBgNVHSMEGDAWgBSfpxVdAF5iXYP05dJlpxtTNRnpcjBfBgNVHR8EWDBWMFSgUqBQhk5odHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNyb3NvZnQlMjBUaW1lLVN0YW1wJTIwUENBJTIwMjAxMCgxKS5jcmwwbAYIKwYBBQUHAQEEYDBeMFwGCCsGAQUFBzAChlBodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMFRpbWUtU3RhbXAlMjBQQ0ElMjAyMDEwKDEpLmNydDAMBgNVHRMBAf8EAjAAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMIMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQsFAAOCAgEALlTZsg0uBcgdZsxypW5/2ORRP8rzPIsG+7mHwmuphHbP95o7bKjU6hz1KHK/Ft70ZkO7uSRTPFLInUhmSxlnDoUOrrJk1Pc8SMASdESlEEvxL6ZteD47hUtLQtKZvxchmIuxqpnR8MRy/cd4D7/L+oqcJBaReCGloQzAYxDNGSEbBwZ1evXMalDsdPG9+7nvEXFlfUyQqdYUQ0nq6t37i15SBePSeAg7H/+Xdcwrce3xPb7O8Yk0AX7n/moGTuevTv3MgJsVe/G2J003l6hd1b72sAiRL5QYPX0Bl0Gu23p1n450Cq4GIORhDmRV9QwpLfXIdA4aCYXG4I7NOlYdqWuql0iWWzLwo2yPlT2w42JYB3082XIQcdtBkOaL38E2U5jJO3Rh6EtsOi+ZlQ1rOTv0538D3XuaoJ1OqsTHAEZQ9sw/7+91hSpomym6kGdS2M5//voMCFXLx797rNH3w+SmWaWI7ZusvdDesPr5kJV2sYz1GbqFQMEGS9iH5iOYZ1xDkcHpZP1F5zz6oMeZuEuFfhl1pqt3n85d4tuDHZ/svhBBCPcqCqOoM5YidWE0TWBi1NYsd7jzzZ3+Tsu6LQrWDwRmsoPuZo6uwkso8qV6Bx4n0UKpjWwNQpSFFrQQdRb5mQouWiEqtLsXCN2sg1aQ8GBtDOcKN0TabjtCNNswggdxMIIFWaADAgECAhMzAAAAFcXna54Cm0mZAAAAAAAVMA0GCSqGSIb3DQEBCwUAMIGIMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAxMDAeFw0yMTA5MzAxODIyMjVaFw0zMDA5MzAxODMyMjVaMHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAyMDEwMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA5OGmTOe0ciELeaLL1yR5vQ7VgtP97pwHB9KpbE51yMo1V/YBf2xK4OK9uT4XYDP/XE/HZveVU3Fa4n5KWv64NmeFRiMMtY0Tz3cywBAY6GB9alKDRLemjkZrBxTzxXb1hlDcwUTIcVxRMTegCjhuje3XD9gmU3w5YQJ6xKr9cmmvHaus9ja+NSZk2pg7uhp7M62AW36MEBydUv626GIl3GoPz130/o5Tz9bshVZN7928jaTjkY+yOSxRnOlwaQ3KNi1wjjHINSi947SHJMPgyY9+tVSP3PoFVZhtaDuaRr3tpK56KTesy+uDRedGbsoy1cCGMFxPLOJiss254o2I5JasAUq7vnGpF1tnYN74kpEeHT39IM9zfUGaRnXNxF803RKJ1v2lIH1+/NmeRd+2ci/bfV+AutuqfjbsNkz2K26oElHovwUDo9Fzpk03dJQcNIIP8BDyt0cY7afomXw/TNuvXsLz1dhzPUNOwTM5TI4CvEJoLhDqhFFG4tG9ahhaYQFzymeiXtcodgLiMxhy16cg8ML6EgrXY28MyTZki1ugpoMhXV8wdJGUlNi5UPkLiWHzNgY1GIRH29wb0f2y1BzFa/ZcUlFdEtsluq9QBXpsxREdcu+N+VLEhReTwDwV2xo3xwgVGD94q0W29R6HXtqPnhZyacaue7e3PmriLq0CAwEAAaOCAd0wggHZMBIGCSsGAQQBgjcVAQQFAgMBAAEwIwYJKwYBBAGCNxUCBBYEFCqnUv5kxJq+gpE8RjUpzxD/LwTuMB0GA1UdDgQWBBSfpxVdAF5iXYP05dJlpxtTNRnpcjBcBgNVHSAEVTBTMFEGDCsGAQQBgjdMg30BATBBMD8GCCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3MvUmVwb3NpdG9yeS5odG0wEwYDVR0lBAwwCgYIKwYBBQUHAwgwGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAU1fZWy4/oolxiaNE9lJBb186aGMQwVgYDVR0fBE8wTTBLoEmgR4ZFaHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraS9jcmwvcHJvZHVjdHMvTWljUm9vQ2VyQXV0XzIwMTAtMDYtMjMuY3JsMFoGCCsGAQUFBwEBBE4wTDBKBggrBgEFBQcwAoY+aHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraS9jZXJ0cy9NaWNSb29DZXJBdXRfMjAxMC0wNi0yMy5jcnQwDQYJKoZIhvcNAQELBQADggIBAJ1VffwqreEsH2cBMSRb4Z5yS/ypb+pcFLY+TkdkeLEGk5c9MTO1OdfCcTY/2mRsfNB1OW27DzHkwo/7bNGhlBgi7ulmZzpTTd2YurYeeNg2LpypglYAA7AFvonoaeC6Ce5732pvvinLbtg/SHUB2RjebYIM9W0jVOR4U3UkV7ndn/OOPcbzaN9l9qRWqveVtihVJ9AkvUCgvxm2EhIRXT0n4ECWOKz3+SmJw7wXsFSFQrP8DJ6LGYnn8AtqgcKBGUIZUnWKNsIdw2FzLixre24/LAl4FOmRsqlb30mjdAy87JGA0j3mSj5mO0+7hvoyGtmW9I/2kQH2zsZ0/fZMcm8Qq3UwxTSwethQ/gpY3UA8x1RtnWN0SCyxTkctwRQEcb9k+SS+c23Kjgm9swFXSVRk2XPXfx5bRAGOWhmRaw2fpCjcZxkoJLo4S5pu+yFUa2pFEUep8beuyOiJXk+d0tBMdrVXVAmxaQFEfnyhYWxz/gq77EFmPWn9y8FBSX5+k77L+DvktxW/tM4+pTFRhLy/AsGConsXHRWJjXD+57XQKBqJC4822rpM+Zv/Cuk0+CQ1ZyvgDbjmjJnW4SLq8CdCPSWU5nR0W2rRnj7tfqAxM328y+l7vzhwRNGQ8cirOoo6CGJ/2XBjU02N7oJtpQUQwXEGahC0HVUzWLOhcGbyoYIC1DCCAj0CAQEwggEAoYHYpIHVMIHSMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMS0wKwYDVQQLEyRNaWNyb3NvZnQgSXJlbGFuZCBPcGVyYXRpb25zIExpbWl0ZWQxJjAkBgNVBAsTHVRoYWxlcyBUU1MgRVNOOjg2REYtNEJCQy05MzM1MSUwIwYDVQQDExxNaWNyb3NvZnQgVGltZS1TdGFtcCBTZXJ2aWNloiMKAQEwBwYFKw4DAhoDFQA2I0cZZds1oM/GfKINsQ5yJKMWEKCBgzCBgKR+MHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAyMDEwMA0GCSqGSIb3DQEBBQUAAgUA6h4aiTAiGA8yMDI0MDYyMDExMDMzN1oYDzIwMjQwNjIxMTEwMzM3WjB0MDoGCisGAQQBhFkKBAExLDAqMAoCBQDqHhqJAgEAMAcCAQACAgX7MAcCAQACAhH8MAoCBQDqH2wJAgEAMDYGCisGAQQBhFkKBAIxKDAmMAwGCisGAQQBhFkKAwKgCjAIAgEAAgMHoSChCjAIAgEAAgMBhqAwDQYJKoZIhvcNAQEFBQADgYEAGfu+JpdwJYpU+xUOu693Nef9bUv1la7pxXUtY+P82b5q8/FFZp5WUobGx6JrVuJTDuvqbEZYjwTzWIVUHog1kTXjji1NCFLCVnrlJqPwtH9uRQhnFDSmiP0tG1rNwht6ZViFrRexp+7cebOHSPfk+ZzrUyp9DptMAJmagfLClxAxggQNMIIECQIBATCBkzB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYDVQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMAITMwAAAd1dVx2V1K2qGwABAAAB3TANBglghkgBZQMEAgEFAKCCAUowGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEEMC8GCSqGSIb3DQEJBDEiBCCZX/UOu+vfJ4kbHbQYoi1Ztz4aZycnWIB1vBYNNo/atDCB+gYLKoZIhvcNAQkQAi8xgeowgecwgeQwgb0EIGH/Di2aZaxPeJmce0fRWTftQI3TaVHFj5GI43rAMWNmMIGYMIGApH4wfDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEmMCQGA1UEAxMdTWljcm9zb2Z0IFRpbWUtU3RhbXAgUENBIDIwMTACEzMAAAHdXVcdldStqhsAAQAAAd0wIgQg5Fd0dBTHG2u3SYEF2YcmJ7rHH4kHcV0GlSr/y6AQOYEwDQYJKoZIhvcNAQELBQAEggIAGcOQBnVMUPnu4d2wmccNjUncMe5i0C5VkJ7/VjqN4W6vSuKz7BFVIaUMoufkY94epjipx+Ip3BTj2heew7xB+f6zBKTlkXfakH7TEWeju3WzUYNt3kjJyS3SJeJGFJEiln1S6apObwPtbSq9EqwwFOt8pJy9bAvoxuRM6Olib/eiHr3uiKkk6FCccUgG0PYN/PRUU7htzv6uyRXzCpuNpld3eorXt6nqt6bP7k1NFcwcYSv7V3WcoQzObk5Y9G5n/1rc5Hy9eRHwnz1l7MWOZGsJ9swOBFmoVUK8tB1vPy3bjooJBm7jRT9AcdGTaRS/t5nYe5sECI51sIyq3UBPCH8rNse1BIX9WCtcar1Bg6L64lzdPC7FVSh03vVlDZhNNf7tWRZqlYID2zTaY4p4LIW47O0/Rw2Swe4+hvl49e0v0m0FnmmwXN5097waF3Xv7FIDxbcrK+0DTv2p810Igwj6tErwxhP/367Q9EBzxODSJ8uD35DGMmHsTnViavQUBzj8LeTiA6sUZhF54AbI5dQkZLPydlR3GCmo1RKKO1VhDZnpFanj/N856MOlQqe/6x8sguPM+OpF6MWGvQH5SxsSzSf6dxhzS2pEHbirwJ4k1+tuF0LKOxNLwVVQQ9qPABNiWqml4bJk9oZ1dOTDd9EFjepHqynKk4olY3kq5sA="
+ with mock_wire_protocol(data_file) as protocol:
+ extensions = protocol.get_goal_state().extensions_goal_state.extensions
+ self.assertEqual(expected_signature, extensions[0].encoded_signature)
+
+ # extension.encoded_signature should be None if the property does not exist for the extension
+ for i in range(1, 5):
+ self.assertIsNone(extensions[i].encoded_signature)
+
class CaseFoldedDictionaryTestCase(AgentTestCase):
def test_it_should_retrieve_items_ignoring_case(self):
diff --git a/tests/common/protocol/test_goal_state.py b/tests/common/protocol/test_goal_state.py
index 5b4a2948a..9755b72c4 100644
--- a/tests/common/protocol/test_goal_state.py
+++ b/tests/common/protocol/test_goal_state.py
@@ -401,7 +401,7 @@ def test_it_should_download_certs_on_a_new_fast_track_goal_state(self):
with mock_wire_protocol(data_file) as protocol:
goal_state = GoalState(protocol.client)
- cert = "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F"
+ cert = "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9"
crt_path = os.path.join(self.tmp_dir, cert + ".crt")
prv_path = os.path.join(self.tmp_dir, cert + ".prv")
@@ -426,7 +426,7 @@ def test_it_should_download_certs_on_a_new_fabric_goal_state(self):
protocol.mock_wire_data.set_vm_settings_source(GoalStateSource.Fabric)
goal_state = GoalState(protocol.client)
- cert = "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F"
+ cert = "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9"
crt_path = os.path.join(self.tmp_dir, cert + ".crt")
prv_path = os.path.join(self.tmp_dir, cert + ".prv")
@@ -483,6 +483,63 @@ def http_get_handler(url, *_, **__):
if settings.protectedSettings is not None:
self.assertIn(settings.certificateThumbprint, thumbprints, "Certificate is missing from the goal state.")
+ def test_goal_state_should_contain_empty_certs_when_it_is_fails_to_decrypt_certs(self):
+ # This test simulates that scenario by mocking the goal state request is fabric, and it contains incorrect certs(incorrect-certs.xml)
+
+ data_file = "wire/incorrect-certs.xml"
+
+ def http_get_handler(url, *_, **__):
+ if HttpRequestPredicates.is_certificates_request(url):
+ http_get_handler.certificate_requests += 1
+ data = load_data(data_file)
+ return MockHttpResponse(status=200, body=data.encode('utf-8'))
+ return None
+
+ http_get_handler.certificate_requests = 0
+
+ with mock_wire_protocol(wire_protocol_data.DATA_FILE) as protocol:
+ protocol.set_http_handlers(http_get_handler=http_get_handler)
+ protocol.mock_wire_data.reset_call_counts()
+
+ goal_state = GoalState(protocol.client)
+
+ self.assertEqual(0, len(goal_state.certs.summary), "Cert list should be empty")
+ self.assertEqual(1, http_get_handler.certificate_requests, "There should have been exactly 1 requests for the goal state certificates")
+
+ def test_it_should_refresh_the_goal_state_when_it_is_fails_to_decrypt_cert(self):
+ # This test simulates that scenario by mocking the certificates request and returning first a set of certificates (incorrect-certs.xml) that will fail while decrypting, and then a
+ # set (certs.xml) that does match with what extensions are needed. The test then ensures that the goal state was refreshed and the correct certificates were fetched.
+
+ data_files = [
+ "wire/incorrect-certs.xml",
+ "wire/certs.xml"
+ ]
+
+ def http_get_handler(url, *_, **__):
+ if HttpRequestPredicates.is_certificates_request(url):
+ http_get_handler.certificate_requests += 1
+ if http_get_handler.certificate_requests < len(data_files):
+ data = load_data(data_files[http_get_handler.certificate_requests - 1])
+ return MockHttpResponse(status=200, body=data.encode('utf-8'))
+ return None
+ http_get_handler.certificate_requests = 0
+
+ with mock_wire_protocol(wire_protocol_data.DATA_FILE_VM_SETTINGS) as protocol:
+ protocol.set_http_handlers(http_get_handler=http_get_handler)
+ protocol.mock_wire_data.reset_call_counts()
+
+ goal_state = GoalState(protocol.client)
+
+ self.assertEqual(2, protocol.mock_wire_data.call_counts['goalstate'], "There should have been exactly 2 requests for the goal state (original + refresh)")
+ self.assertEqual(2, http_get_handler.certificate_requests, "There should have been exactly 2 requests for the goal state certificates (original + refresh)")
+
+ thumbprints = [c.thumbprint for c in goal_state.certs.cert_list.certificates]
+
+ for extension in goal_state.extensions_goal_state.extensions:
+ for settings in extension.settings:
+ if settings.protectedSettings is not None:
+ self.assertIn(settings.certificateThumbprint, thumbprints, "Certificate is missing from the goal state.")
+
def test_it_should_raise_when_goal_state_properties_not_initialized(self):
with GoalStateTestCase._create_protocol_ws_and_hgap_in_sync() as protocol:
goal_state = GoalState(
diff --git a/tests/common/protocol/test_imds.py b/tests/common/protocol/test_imds.py
index efc705ffa..9333a5f9a 100644
--- a/tests/common/protocol/test_imds.py
+++ b/tests/common/protocol/test_imds.py
@@ -56,7 +56,7 @@ class TestImds(AgentTestCase):
def test_get(self, mock_http_get):
mock_http_get.return_value = get_mock_compute_response()
- test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP)
+ test_subject = imds.ImdsClient()
test_subject.get_compute()
self.assertEqual(1, mock_http_get.call_count)
@@ -71,21 +71,21 @@ def test_get(self, mock_http_get):
def test_get_bad_request(self, mock_http_get):
mock_http_get.return_value = MockHttpResponse(status=restutil.httpclient.BAD_REQUEST)
- test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP)
+ test_subject = imds.ImdsClient()
self.assertRaises(HttpError, test_subject.get_compute)
@patch("azurelinuxagent.common.protocol.imds.restutil.http_get")
def test_get_internal_service_error(self, mock_http_get):
mock_http_get.return_value = MockHttpResponse(status=restutil.httpclient.INTERNAL_SERVER_ERROR)
- test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP)
+ test_subject = imds.ImdsClient()
self.assertRaises(HttpError, test_subject.get_compute)
@patch("azurelinuxagent.common.protocol.imds.restutil.http_get")
def test_get_empty_response(self, mock_http_get):
mock_http_get.return_value = MockHttpResponse(status=httpclient.OK, body=''.encode('utf-8'))
- test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP)
+ test_subject = imds.ImdsClient()
self.assertRaises(ValueError, test_subject.get_compute)
def test_deserialize_ComputeInfo(self):
@@ -359,7 +359,7 @@ def _imds_response(f):
return fh.read()
def _assert_validation(self, http_status_code, http_response, expected_valid, expected_response):
- test_subject = imds.ImdsClient(restutil.KNOWN_WIRESERVER_IP)
+ test_subject = imds.ImdsClient()
with patch("azurelinuxagent.common.utils.restutil.http_get") as mock_http_get:
mock_http_get.return_value = MockHttpResponse(status=http_status_code,
reason='reason',
@@ -386,99 +386,86 @@ def test_endpoint_fallback(self):
# http GET calls and enforces a single GET call (fallback would cause 2) and
# checks the url called.
- test_subject = imds.ImdsClient("foo.bar")
+ test_subject = imds.ImdsClient()
# ensure user-agent gets set correctly
for is_health, expected_useragent in [(False, restutil.HTTP_USER_AGENT), (True, restutil.HTTP_USER_AGENT_HEALTH)]:
# set a different resource path for health query to make debugging unit test easier
resource_path = 'something/health' if is_health else 'something'
- for has_primary_ioerror in (False, True):
- # secondary endpoint unreachable
- test_subject._http_get = Mock(side_effect=self._mock_http_get)
- self._mock_imds_setup(primary_ioerror=has_primary_ioerror, secondary_ioerror=True)
- result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
- self.assertFalse(result.success) if has_primary_ioerror else self.assertTrue(result.success) # pylint: disable=expression-not-assigned
- self.assertFalse(result.service_error)
- if has_primary_ioerror:
- self.assertEqual('IMDS error in /metadata/{0}: Unable to connect to endpoint'.format(resource_path), result.response)
- else:
- self.assertEqual('Mock success response', result.response)
- for _, kwargs in test_subject._http_get.call_args_list:
- self.assertTrue('User-Agent' in kwargs['headers'])
- self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
- self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count)
-
- # IMDS success
- test_subject._http_get = Mock(side_effect=self._mock_http_get)
- self._mock_imds_setup(primary_ioerror=has_primary_ioerror)
- result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
- self.assertTrue(result.success)
- self.assertFalse(result.service_error)
- self.assertEqual('Mock success response', result.response)
- for _, kwargs in test_subject._http_get.call_args_list:
- self.assertTrue('User-Agent' in kwargs['headers'])
- self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
- self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count)
-
- # IMDS throttled
- test_subject._http_get = Mock(side_effect=self._mock_http_get)
- self._mock_imds_setup(primary_ioerror=has_primary_ioerror, throttled=True)
- result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
- self.assertFalse(result.success)
- self.assertFalse(result.service_error)
- self.assertEqual('IMDS error in /metadata/{0}: Throttled'.format(resource_path), result.response)
- for _, kwargs in test_subject._http_get.call_args_list:
- self.assertTrue('User-Agent' in kwargs['headers'])
- self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
- self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count)
-
- # IMDS gone error
- test_subject._http_get = Mock(side_effect=self._mock_http_get)
- self._mock_imds_setup(primary_ioerror=has_primary_ioerror, gone_error=True)
- result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
- self.assertFalse(result.success)
- self.assertTrue(result.service_error)
- self.assertEqual('IMDS error in /metadata/{0}: HTTP Failed with Status Code 410: Gone'.format(resource_path), result.response)
- for _, kwargs in test_subject._http_get.call_args_list:
- self.assertTrue('User-Agent' in kwargs['headers'])
- self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
- self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count)
-
- # IMDS bad request
- test_subject._http_get = Mock(side_effect=self._mock_http_get)
- self._mock_imds_setup(primary_ioerror=has_primary_ioerror, bad_request=True)
- result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
- self.assertFalse(result.success)
- self.assertFalse(result.service_error)
- self.assertEqual('IMDS error in /metadata/{0}: [HTTP Failed] [404: reason] Mock not found'.format(resource_path), result.response)
- for _, kwargs in test_subject._http_get.call_args_list:
- self.assertTrue('User-Agent' in kwargs['headers'])
- self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
- self.assertEqual(2 if has_primary_ioerror else 1, test_subject._http_get.call_count)
-
- def _mock_imds_setup(self, primary_ioerror=False, secondary_ioerror=False, gone_error=False, throttled=False, bad_request=False):
- self._mock_imds_expect_fallback = primary_ioerror # pylint: disable=attribute-defined-outside-init
- self._mock_imds_primary_ioerror = primary_ioerror # pylint: disable=attribute-defined-outside-init
- self._mock_imds_secondary_ioerror = secondary_ioerror # pylint: disable=attribute-defined-outside-init
+ # IMDS success
+ test_subject._http_get = Mock(side_effect=self._mock_http_get)
+ self._mock_imds_setup()
+ result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
+ self.assertTrue(result.success)
+ self.assertFalse(result.service_error)
+ self.assertEqual('Mock success response', result.response)
+ for _, kwargs in test_subject._http_get.call_args_list:
+ self.assertTrue('User-Agent' in kwargs['headers'])
+ self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
+ self.assertEqual(1, test_subject._http_get.call_count)
+
+ # Connection error
+ test_subject._http_get = Mock(side_effect=self._mock_http_get)
+ self._mock_imds_setup(ioerror=True)
+ result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
+ self.assertFalse(result.success)
+ self.assertFalse(result.service_error)
+ self.assertEqual('IMDS error in /metadata/{0}: Unable to connect to endpoint'.format(resource_path), result.response)
+ for _, kwargs in test_subject._http_get.call_args_list:
+ self.assertTrue('User-Agent' in kwargs['headers'])
+ self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
+ self.assertEqual(1, test_subject._http_get.call_count)
+
+ # IMDS throttled
+ test_subject._http_get = Mock(side_effect=self._mock_http_get)
+ self._mock_imds_setup(throttled=True)
+ result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
+ self.assertFalse(result.success)
+ self.assertFalse(result.service_error)
+ self.assertEqual('IMDS error in /metadata/{0}: Throttled'.format(resource_path), result.response)
+ for _, kwargs in test_subject._http_get.call_args_list:
+ self.assertTrue('User-Agent' in kwargs['headers'])
+ self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
+ self.assertEqual(1, test_subject._http_get.call_count)
+
+ # IMDS gone error
+ test_subject._http_get = Mock(side_effect=self._mock_http_get)
+ self._mock_imds_setup(gone_error=True)
+ result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
+ self.assertFalse(result.success)
+ self.assertTrue(result.service_error)
+ self.assertEqual('IMDS error in /metadata/{0}: HTTP Failed with Status Code 410: Gone'.format(resource_path), result.response)
+ for _, kwargs in test_subject._http_get.call_args_list:
+ self.assertTrue('User-Agent' in kwargs['headers'])
+ self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
+ self.assertEqual(1, test_subject._http_get.call_count)
+
+ # IMDS bad request
+ test_subject._http_get = Mock(side_effect=self._mock_http_get)
+ self._mock_imds_setup(bad_request=True)
+ result = test_subject.get_metadata(resource_path=resource_path, is_health=is_health)
+ self.assertFalse(result.success)
+ self.assertFalse(result.service_error)
+ self.assertEqual('IMDS error in /metadata/{0}: [HTTP Failed] [404: reason] Mock not found'.format(resource_path), result.response)
+ for _, kwargs in test_subject._http_get.call_args_list:
+ self.assertTrue('User-Agent' in kwargs['headers'])
+ self.assertEqual(expected_useragent, kwargs['headers']['User-Agent'])
+ self.assertEqual(1, test_subject._http_get.call_count)
+
+ def _mock_imds_setup(self, ioerror=False, gone_error=False, throttled=False, bad_request=False):
+ self._mock_imds_ioerror = ioerror # pylint: disable=attribute-defined-outside-init
self._mock_imds_gone_error = gone_error # pylint: disable=attribute-defined-outside-init
self._mock_imds_throttled = throttled # pylint: disable=attribute-defined-outside-init
self._mock_imds_bad_request = bad_request # pylint: disable=attribute-defined-outside-init
def _mock_http_get(self, *_, **kwargs):
- if "foo.bar" == kwargs['endpoint'] and not self._mock_imds_expect_fallback:
- raise Exception("Unexpected endpoint called")
- if self._mock_imds_primary_ioerror and "169.254.169.254" == kwargs['endpoint']:
- raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made"
- .format(kwargs['endpoint'], kwargs['resource_path']))
- if self._mock_imds_secondary_ioerror and "foo.bar" == kwargs['endpoint']:
- raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made"
- .format(kwargs['endpoint'], kwargs['resource_path']))
+ if self._mock_imds_ioerror:
+ raise HttpError("[HTTP Failed] GET http://{0}/metadata/{1} -- IOError timed out -- 6 attempts made".format(kwargs['endpoint'], kwargs['resource_path']))
if self._mock_imds_gone_error:
raise ResourceGoneError("Resource is gone")
if self._mock_imds_throttled:
- raise HttpError("[HTTP Retry] GET http://{0}/metadata/{1} -- Status Code 429 -- 25 attempts made"
- .format(kwargs['endpoint'], kwargs['resource_path']))
+ raise HttpError("[HTTP Retry] GET http://{0}/metadata/{1} -- Status Code 429 -- 25 attempts made".format(kwargs['endpoint'], kwargs['resource_path']))
resp = MagicMock()
resp.reason = 'reason'
diff --git a/tests/common/protocol/test_protocol_util.py b/tests/common/protocol/test_protocol_util.py
index b60ca9af9..494d25319 100644
--- a/tests/common/protocol/test_protocol_util.py
+++ b/tests/common/protocol/test_protocol_util.py
@@ -188,8 +188,8 @@ def test_get_protocol_wireserver_to_wireserver_update_removes_metadataserver_art
self.assertFalse(os.path.exists(mds_cert_path))
# Check firewall rules was reset
- protocol_util.osutil.remove_firewall.assert_called_once()
- protocol_util.osutil.enable_firewall.assert_called_once()
+ self.assertEqual(1, protocol_util.osutil.remove_firewall.call_count, "remove_firewall should be called once")
+ self.assertEqual(1, protocol_util.osutil.enable_firewall.call_count, "enable_firewall should be called once")
@patch('azurelinuxagent.common.conf.get_lib_dir')
@patch('azurelinuxagent.common.conf.enable_firewall')
@@ -234,8 +234,8 @@ def test_get_protocol_metadataserver_to_wireserver_update_removes_metadataserver
self.assertTrue(os.path.isfile(ws_cert_path))
# Check firewall rules was reset
- protocol_util.osutil.remove_firewall.assert_called_once()
- protocol_util.osutil.enable_firewall.assert_called_once()
+ self.assertEqual(1, protocol_util.osutil.remove_firewall.call_count, "remove_firewall should be called once")
+ self.assertEqual(1, protocol_util.osutil.enable_firewall.call_count, "enable_firewall should be called once")
# Check Protocol File is updated to WireProtocol
with open(os.path.join(dir, PROTOCOL_FILE_NAME), "r") as f:
diff --git a/tests/common/protocol/test_wire.py b/tests/common/protocol/test_wire.py
index a51a21fdc..8fa9c9603 100644
--- a/tests/common/protocol/test_wire.py
+++ b/tests/common/protocol/test_wire.py
@@ -95,11 +95,11 @@ def _test_getters(self, test_data, certsMustBePresent, __, MockCryptUtil, _):
protocol.get_goal_state().fetch_extension_manifest(ext_handler.name, ext_handler.manifest_uris)
crt1 = os.path.join(self.tmp_dir,
- '38B85D88F03D1A8E1C671EB169274C09BC4D4703.crt')
+ '8979F1AC8C4215827BF3B5A403E6137B504D02A4.crt')
crt2 = os.path.join(self.tmp_dir,
- 'BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F.crt')
+ 'F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9.crt')
prv2 = os.path.join(self.tmp_dir,
- 'BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F.prv')
+ 'F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9.prv')
if certsMustBePresent:
self.assertTrue(os.path.isfile(crt1))
self.assertTrue(os.path.isfile(crt2))
@@ -526,6 +526,28 @@ def test_report_event_should_only_attempt_max_retries_if_fails_to_send(self, moc
client.report_event(self._get_telemetry_events_generator(event_list))
self.assertEqual(mock_http_request.call_count, 5)
+ def test_get_header_for_cert_should_use_triple_des(self, *_):
+ with mock_wire_protocol(wire_protocol_data.DATA_FILE) as protocol:
+ headers = protocol.client.get_header_for_cert()
+ self.assertIn("x-ms-cipher-name", headers)
+ self.assertEqual(headers["x-ms-cipher-name"], "DES_EDE3_CBC", "Unexpected x-ms-cipher-name")
+
+ def test_get_header_for_remote_access_should_use_aes128(self, *_):
+ with mock_wire_protocol(wire_protocol_data.DATA_FILE) as protocol:
+ headers = protocol.client.get_header_for_remote_access()
+ self.assertIn("x-ms-cipher-name", headers)
+ self.assertEqual(headers["x-ms-cipher-name"], "AES128_CBC", "Unexpected x-ms-cipher-name")
+
+ def test_detect_should_handle_inconsistent_goal_state_errors(self, *_):
+ data_file = wire_protocol_data.DATA_FILE_VM_SETTINGS # Certificates are checked only on FastTrack goal states
+ data_file['certs'] = "wire/certs-2.xml" # Change the certificates to force a GoalStateInconsistentError
+ with mock_wire_protocol(data_file, detect_protocol=False) as protocol:
+ with patch("azurelinuxagent.common.logger.warn") as mock_warn:
+ protocol.detect()
+ self.assertTrue(
+ any(len(args) == 2 and args[1].startswith("[GoalStateInconsistentError]") for args, _ in mock_warn.call_args_list),
+ "Did not find any warnings about an GoalStateInconsistentError: {0}".format(mock_warn.call_args_list))
+
class TestWireClient(HttpRequestPredicates, AgentTestCase):
def test_get_ext_conf_without_extensions_should_retrieve_vmagent_manifests_info(self, *args): # pylint: disable=unused-argument
diff --git a/tests/common/utils/test_extension_process_util.py b/tests/common/utils/test_extension_process_util.py
index 316bad6a3..8058292b9 100644
--- a/tests/common/utils/test_extension_process_util.py
+++ b/tests/common/utils/test_extension_process_util.py
@@ -19,9 +19,9 @@
import subprocess
import tempfile
-from azurelinuxagent.ga.cgroup import CpuCgroup
from azurelinuxagent.common.exception import ExtensionError, ExtensionErrorCodes
from azurelinuxagent.common.future import ustr
+from azurelinuxagent.ga.cpucontroller import CpuControllerV1
from azurelinuxagent.ga.extensionprocessutil import format_stdout_stderr, read_output, \
wait_for_process_completion_or_timeout, handle_process_completion
from tests.lib.tools import AgentTestCase, patch, data_dir
@@ -52,7 +52,7 @@ def test_wait_for_process_completion_or_timeout_should_terminate_cleanly(self):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
- timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=5, cpu_cgroup=None)
+ timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=5, cpu_controller=None)
self.assertEqual(timed_out, False)
self.assertEqual(ret, 0)
@@ -70,7 +70,8 @@ def test_wait_for_process_completion_or_timeout_should_kill_process_on_timeout(s
# We don't actually mock the kill, just wrap it so we can assert its call count
with patch('azurelinuxagent.ga.extensionprocessutil.os.killpg', wraps=os.killpg) as patch_kill:
with patch('time.sleep') as mock_sleep:
- timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=timeout, cpu_cgroup=None)
+ timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=timeout,
+ cpu_controller=None)
# We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure
# we're "waiting" the correct amount of time before killing the process
@@ -89,7 +90,7 @@ def test_handle_process_completion_should_return_nonzero_when_process_fails(self
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
- timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=5, cpu_cgroup=None)
+ timed_out, ret, _ = wait_for_process_completion_or_timeout(process=process, timeout=5, cpu_controller=None)
self.assertEqual(timed_out, False)
self.assertEqual(ret, 2)
@@ -105,12 +106,8 @@ def test_handle_process_completion_should_return_process_output(self):
stderr=stderr,
preexec_fn=os.setsid)
- process_output = handle_process_completion(process=process,
- command=command,
- timeout=5,
- stdout=stdout,
- stderr=stderr,
- error_code=42)
+ process_output = handle_process_completion(process=process, command=command, timeout=5, stdout=stdout,
+ stderr=stderr, error_code=42)
expected_output = "[stdout]\ndummy stdout\n\n\n[stderr]\ndummy stderr\n"
self.assertEqual(process_output, expected_output)
@@ -130,12 +127,8 @@ def test_handle_process_completion_should_raise_on_timeout(self):
stderr=stderr,
preexec_fn=os.setsid)
- handle_process_completion(process=process,
- command=command,
- timeout=timeout,
- stdout=stdout,
- stderr=stderr,
- error_code=42)
+ handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout,
+ stderr=stderr, error_code=42)
# We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure
# we're "waiting" the correct amount of time before killing the process and raising an exception
@@ -156,9 +149,9 @@ def test_handle_process_completion_should_log_throttled_time_on_timeout(self):
with patch('time.sleep') as mock_sleep:
with self.assertRaises(ExtensionError) as context_manager:
test_file = os.path.join(self.tmp_dir, "cpu.stat")
- shutil.copyfile(os.path.join(data_dir, "cgroups", "cpu.stat_t0"),
+ shutil.copyfile(os.path.join(data_dir, "cgroups", "v1", "cpu.stat_t0"),
test_file) # throttled_time = 50
- cgroup = CpuCgroup("test", self.tmp_dir)
+ cpu_controller = CpuControllerV1("test", self.tmp_dir)
process = subprocess.Popen(command, # pylint: disable=subprocess-popen-preexec-fn
shell=True,
cwd=self.tmp_dir,
@@ -167,13 +160,8 @@ def test_handle_process_completion_should_log_throttled_time_on_timeout(self):
stderr=stderr,
preexec_fn=os.setsid)
- handle_process_completion(process=process,
- command=command,
- timeout=timeout,
- stdout=stdout,
- stderr=stderr,
- error_code=42,
- cpu_cgroup=cgroup)
+ handle_process_completion(process=process, command=command, timeout=timeout, stdout=stdout,
+ stderr=stderr, error_code=42, cpu_controller=cpu_controller)
# We're mocking sleep to avoid prolonging the test execution time, but we still want to make sure
# we're "waiting" the correct amount of time before killing the process and raising an exception
@@ -200,11 +188,7 @@ def test_handle_process_completion_should_raise_on_nonzero_exit_code(self):
stderr=stderr,
preexec_fn=os.setsid)
- handle_process_completion(process=process,
- command=command,
- timeout=4,
- stdout=stdout,
- stderr=stderr,
+ handle_process_completion(process=process, command=command, timeout=4, stdout=stdout, stderr=stderr,
error_code=error_code)
self.assertEqual(context_manager.exception.code, error_code)
diff --git a/tests/data/cgroups/cgroup.procs b/tests/data/cgroups/cgroup.procs
new file mode 100644
index 000000000..93c25c16d
--- /dev/null
+++ b/tests/data/cgroups/cgroup.procs
@@ -0,0 +1,3 @@
+123
+234
+345
\ No newline at end of file
diff --git a/tests/data/cgroups/cpu_mount/cpuacct.stat b/tests/data/cgroups/cpu_mount/cpuacct.stat
deleted file mode 100644
index dbdaec701..000000000
--- a/tests/data/cgroups/cpu_mount/cpuacct.stat
+++ /dev/null
@@ -1,2 +0,0 @@
-user 50000
-system 100000
diff --git a/tests/data/cgroups/cpu.stat b/tests/data/cgroups/v1/cpu.stat
similarity index 100%
rename from tests/data/cgroups/cpu.stat
rename to tests/data/cgroups/v1/cpu.stat
diff --git a/tests/data/cgroups/cpu.stat_t0 b/tests/data/cgroups/v1/cpu.stat_t0
similarity index 100%
rename from tests/data/cgroups/cpu.stat_t0
rename to tests/data/cgroups/v1/cpu.stat_t0
diff --git a/tests/data/cgroups/cpu.stat_t1 b/tests/data/cgroups/v1/cpu.stat_t1
similarity index 100%
rename from tests/data/cgroups/cpu.stat_t1
rename to tests/data/cgroups/v1/cpu.stat_t1
diff --git a/tests/data/cgroups/cpuacct.stat b/tests/data/cgroups/v1/cpuacct.stat
similarity index 100%
rename from tests/data/cgroups/cpuacct.stat
rename to tests/data/cgroups/v1/cpuacct.stat
diff --git a/tests/data/cgroups/cpuacct.stat_t0 b/tests/data/cgroups/v1/cpuacct.stat_t0
similarity index 100%
rename from tests/data/cgroups/cpuacct.stat_t0
rename to tests/data/cgroups/v1/cpuacct.stat_t0
diff --git a/tests/data/cgroups/cpuacct.stat_t1 b/tests/data/cgroups/v1/cpuacct.stat_t1
similarity index 100%
rename from tests/data/cgroups/cpuacct.stat_t1
rename to tests/data/cgroups/v1/cpuacct.stat_t1
diff --git a/tests/data/cgroups/cpuacct.stat_t2 b/tests/data/cgroups/v1/cpuacct.stat_t2
similarity index 100%
rename from tests/data/cgroups/cpuacct.stat_t2
rename to tests/data/cgroups/v1/cpuacct.stat_t2
diff --git a/tests/data/cgroups/memory_mount/memory.max_usage_in_bytes b/tests/data/cgroups/v1/memory.max_usage_in_bytes
similarity index 100%
rename from tests/data/cgroups/memory_mount/memory.max_usage_in_bytes
rename to tests/data/cgroups/v1/memory.max_usage_in_bytes
diff --git a/tests/data/cgroups/memory_mount/memory.stat b/tests/data/cgroups/v1/memory.stat
similarity index 100%
rename from tests/data/cgroups/memory_mount/memory.stat
rename to tests/data/cgroups/v1/memory.stat
diff --git a/tests/data/cgroups/missing_memory_counters/memory.stat b/tests/data/cgroups/v1/memory.stat_missing
similarity index 100%
rename from tests/data/cgroups/missing_memory_counters/memory.stat
rename to tests/data/cgroups/v1/memory.stat_missing
diff --git a/tests/data/cgroups/proc_stat_t0 b/tests/data/cgroups/v1/proc_stat_t0
similarity index 100%
rename from tests/data/cgroups/proc_stat_t0
rename to tests/data/cgroups/v1/proc_stat_t0
diff --git a/tests/data/cgroups/proc_stat_t1 b/tests/data/cgroups/v1/proc_stat_t1
similarity index 100%
rename from tests/data/cgroups/proc_stat_t1
rename to tests/data/cgroups/v1/proc_stat_t1
diff --git a/tests/data/cgroups/proc_stat_t2 b/tests/data/cgroups/v1/proc_stat_t2
similarity index 100%
rename from tests/data/cgroups/proc_stat_t2
rename to tests/data/cgroups/v1/proc_stat_t2
diff --git a/tests/data/cgroups/v2/cpu.stat b/tests/data/cgroups/v2/cpu.stat
new file mode 100644
index 000000000..6fcb7b86f
--- /dev/null
+++ b/tests/data/cgroups/v2/cpu.stat
@@ -0,0 +1,9 @@
+usage_usec 817045397
+user_usec 742283732
+system_usec 74761665
+core_sched.force_idle_usec 0
+nr_periods 165261
+nr_throttled 162912
+throttled_usec 15735198706
+nr_bursts 0
+burst_usec 0
diff --git a/tests/data/cgroups/v2/cpu.stat_t0 b/tests/data/cgroups/v2/cpu.stat_t0
new file mode 100644
index 000000000..6fcb7b86f
--- /dev/null
+++ b/tests/data/cgroups/v2/cpu.stat_t0
@@ -0,0 +1,9 @@
+usage_usec 817045397
+user_usec 742283732
+system_usec 74761665
+core_sched.force_idle_usec 0
+nr_periods 165261
+nr_throttled 162912
+throttled_usec 15735198706
+nr_bursts 0
+burst_usec 0
diff --git a/tests/data/cgroups/v2/cpu.stat_t1 b/tests/data/cgroups/v2/cpu.stat_t1
new file mode 100644
index 000000000..a2eaecf6e
--- /dev/null
+++ b/tests/data/cgroups/v2/cpu.stat_t1
@@ -0,0 +1,9 @@
+usage_usec 819624087
+user_usec 744545316
+system_usec 75078770
+core_sched.force_idle_usec 0
+nr_periods 165783
+nr_throttled 163430
+throttled_usec 15796563650
+nr_bursts 0
+burst_usec 0
diff --git a/tests/data/cgroups/v2/cpu.stat_t2 b/tests/data/cgroups/v2/cpu.stat_t2
new file mode 100644
index 000000000..cca6a6e42
--- /dev/null
+++ b/tests/data/cgroups/v2/cpu.stat_t2
@@ -0,0 +1,9 @@
+usage_usec 822052295
+user_usec 746640066
+system_usec 75412229
+core_sched.force_idle_usec 0
+nr_periods 166274
+nr_throttled 163917
+throttled_usec 15853013984
+nr_bursts 0
+burst_usec 0
diff --git a/tests/data/cgroups/v2/memory.events b/tests/data/cgroups/v2/memory.events
new file mode 100644
index 000000000..ee154297a
--- /dev/null
+++ b/tests/data/cgroups/v2/memory.events
@@ -0,0 +1,6 @@
+low 0
+high 9
+max 0
+oom 0
+oom_kill 0
+oom_group_kill 0
diff --git a/tests/data/cgroups/v2/memory.events_missing b/tests/data/cgroups/v2/memory.events_missing
new file mode 100644
index 000000000..5a5d05a34
--- /dev/null
+++ b/tests/data/cgroups/v2/memory.events_missing
@@ -0,0 +1,5 @@
+low 0
+max 0
+oom 0
+oom_kill 0
+oom_group_kill 0
diff --git a/tests/data/cgroups/v2/memory.peak b/tests/data/cgroups/v2/memory.peak
new file mode 100644
index 000000000..25140d458
--- /dev/null
+++ b/tests/data/cgroups/v2/memory.peak
@@ -0,0 +1 @@
+194494464
diff --git a/tests/data/cgroups/v2/memory.stat b/tests/data/cgroups/v2/memory.stat
new file mode 100644
index 000000000..0b0d4c52d
--- /dev/null
+++ b/tests/data/cgroups/v2/memory.stat
@@ -0,0 +1,53 @@
+anon 17589300
+file 134553600
+kernel 25653248
+kernel_stack 0
+pagetables 0
+sec_pagetables 0
+percpu 726400
+sock 0
+vmalloc 0
+shmem 0
+zswap 0
+zswapped 0
+file_mapped 0
+file_dirty 12288
+file_writeback 0
+swapcached 0
+anon_thp 0
+file_thp 0
+shmem_thp 0
+inactive_anon 0
+active_anon 0
+inactive_file 127213568
+active_file 7340032
+unevictable 0
+slab_reclaimable 24061424
+slab_unreclaimable 0
+slab 24061424
+workingset_refault_anon 0
+workingset_refault_file 0
+workingset_activate_anon 0
+workingset_activate_file 0
+workingset_restore_anon 0
+workingset_restore_file 0
+workingset_nodereclaim 128
+pgscan 56624
+pgsteal 56622
+pgscan_kswapd 56624
+pgscan_direct 0
+pgscan_khugepaged 0
+pgsteal_kswapd 56622
+pgsteal_direct 0
+pgsteal_khugepaged 0
+pgfault 3673191
+pgmajfault 1
+pgrefill 124195
+pgactivate 2
+pgdeactivate 0
+pglazyfree 0
+pglazyfreed 0
+zswpin 0
+zswpout 0
+thp_fault_alloc 255
+thp_collapse_alloc 111
diff --git a/tests/data/cgroups/v2/memory.stat_missing b/tests/data/cgroups/v2/memory.stat_missing
new file mode 100644
index 000000000..96d43db68
--- /dev/null
+++ b/tests/data/cgroups/v2/memory.stat_missing
@@ -0,0 +1,51 @@
+kernel 25653248
+kernel_stack 0
+pagetables 0
+sec_pagetables 0
+percpu 726400
+sock 0
+vmalloc 0
+shmem 0
+zswap 0
+zswapped 0
+file_mapped 0
+file_dirty 12288
+file_writeback 0
+swapcached 0
+anon_thp 0
+file_thp 0
+shmem_thp 0
+inactive_anon 0
+active_anon 0
+inactive_file 127213568
+active_file 7340032
+unevictable 0
+slab_reclaimable 24061424
+slab_unreclaimable 0
+slab 24061424
+workingset_refault_anon 0
+workingset_refault_file 0
+workingset_activate_anon 0
+workingset_activate_file 0
+workingset_restore_anon 0
+workingset_restore_file 0
+workingset_nodereclaim 128
+pgscan 56624
+pgsteal 56622
+pgscan_kswapd 56624
+pgscan_direct 0
+pgscan_khugepaged 0
+pgsteal_kswapd 56622
+pgsteal_direct 0
+pgsteal_khugepaged 0
+pgfault 3673191
+pgmajfault 1
+pgrefill 124195
+pgactivate 2
+pgdeactivate 0
+pglazyfree 0
+pglazyfreed 0
+zswpin 0
+zswpout 0
+thp_fault_alloc 255
+thp_collapse_alloc 111
diff --git a/tests/data/cgroups/v2/memory.swap.current b/tests/data/cgroups/v2/memory.swap.current
new file mode 100644
index 000000000..b92677edb
--- /dev/null
+++ b/tests/data/cgroups/v2/memory.swap.current
@@ -0,0 +1 @@
+20000
diff --git a/tests/data/cgroups/v2/proc_uptime_t0 b/tests/data/cgroups/v2/proc_uptime_t0
new file mode 100644
index 000000000..d035316d9
--- /dev/null
+++ b/tests/data/cgroups/v2/proc_uptime_t0
@@ -0,0 +1 @@
+776968.02 1495073.30
diff --git a/tests/data/cgroups/v2/proc_uptime_t1 b/tests/data/cgroups/v2/proc_uptime_t1
new file mode 100644
index 000000000..f0660cf12
--- /dev/null
+++ b/tests/data/cgroups/v2/proc_uptime_t1
@@ -0,0 +1 @@
+777350.57 1495797.44
diff --git a/tests/data/cgroups/v2/proc_uptime_t2 b/tests/data/cgroups/v2/proc_uptime_t2
new file mode 100644
index 000000000..ae3e36aad
--- /dev/null
+++ b/tests/data/cgroups/v2/proc_uptime_t2
@@ -0,0 +1 @@
+779218.68 1499425.34
diff --git a/tests/data/ext/handler_manifest/manifest_boolean_fields_false.json b/tests/data/ext/handler_manifest/manifest_boolean_fields_false.json
new file mode 100644
index 000000000..5ad65fe89
--- /dev/null
+++ b/tests/data/ext/handler_manifest/manifest_boolean_fields_false.json
@@ -0,0 +1,15 @@
+[{
+ "name": "ExampleHandlerLinux",
+ "version": 1.0,
+ "handlerManifest": {
+ "installCommand": "install_cmd",
+ "uninstallCommand": "uninstall_cmd",
+ "updateCommand": "update_cmd",
+ "enableCommand": "enable_cmd",
+ "disableCommand": "disable_cmd",
+ "reportHeartbeat": "false",
+ "continueOnUpdateFailure": "false",
+ "supportsMultipleExtensions": "false"
+ }
+}]
+
diff --git a/tests/data/ext/handler_manifest/manifest_boolean_fields_invalid.json b/tests/data/ext/handler_manifest/manifest_boolean_fields_invalid.json
new file mode 100644
index 000000000..39767d747
--- /dev/null
+++ b/tests/data/ext/handler_manifest/manifest_boolean_fields_invalid.json
@@ -0,0 +1,15 @@
+[{
+ "name": "ExampleHandlerLinux",
+ "version": 1.0,
+ "handlerManifest": {
+ "installCommand": "install_cmd",
+ "uninstallCommand": "uninstall_cmd",
+ "updateCommand": "update_cmd",
+ "enableCommand": "enable_cmd",
+ "disableCommand": "disable_cmd",
+ "reportHeartbeat": "invalid",
+ "continueOnUpdateFailure": "invalid",
+ "supportsMultipleExtensions": 1
+ }
+}]
+
diff --git a/tests/data/ext/handler_manifest/manifest_boolean_fields_strings.json b/tests/data/ext/handler_manifest/manifest_boolean_fields_strings.json
new file mode 100644
index 000000000..ef60d7954
--- /dev/null
+++ b/tests/data/ext/handler_manifest/manifest_boolean_fields_strings.json
@@ -0,0 +1,15 @@
+[{
+ "name": "ExampleHandlerLinux",
+ "version": 1.0,
+ "handlerManifest": {
+ "installCommand": "install_cmd",
+ "uninstallCommand": "uninstall_cmd",
+ "updateCommand": "update_cmd",
+ "enableCommand": "enable_cmd",
+ "disableCommand": "disable_cmd",
+ "reportHeartbeat": "true",
+ "continueOnUpdateFailure": "true",
+ "supportsMultipleExtensions": "True"
+ }
+}]
+
diff --git a/tests/data/ext/handler_manifest/manifest_no_optional_fields.json b/tests/data/ext/handler_manifest/manifest_no_optional_fields.json
new file mode 100644
index 000000000..db3a66948
--- /dev/null
+++ b/tests/data/ext/handler_manifest/manifest_no_optional_fields.json
@@ -0,0 +1,12 @@
+[{
+ "name": "ExampleHandlerLinux",
+ "version": 1.0,
+ "handlerManifest": {
+ "installCommand": "install_cmd",
+ "uninstallCommand": "uninstall_cmd",
+ "updateCommand": "update_cmd",
+ "enableCommand": "enable_cmd",
+ "disableCommand": "disable_cmd"
+ }
+}]
+
diff --git a/tests/data/ext/handler_manifest/valid_manifest.json b/tests/data/ext/handler_manifest/valid_manifest.json
new file mode 100644
index 000000000..e6b85091d
--- /dev/null
+++ b/tests/data/ext/handler_manifest/valid_manifest.json
@@ -0,0 +1,15 @@
+[{
+ "name": "ExampleHandlerLinux",
+ "version": 1.0,
+ "handlerManifest": {
+ "installCommand": "install_cmd",
+ "uninstallCommand": "uninstall_cmd",
+ "updateCommand": "update_cmd",
+ "enableCommand": "enable_cmd",
+ "disableCommand": "disable_cmd",
+ "reportHeartbeat": true,
+ "continueOnUpdateFailure": true,
+ "supportsMultipleExtensions": true
+ }
+}]
+
diff --git a/tests/data/hostgaplugin/ext_conf-agent_family_version.xml b/tests/data/hostgaplugin/ext_conf-agent_family_version.xml
index 5c9e0028f..a277db3d7 100644
--- a/tests/data/hostgaplugin/ext_conf-agent_family_version.xml
+++ b/tests/data/hostgaplugin/ext_conf-agent_family_version.xml
@@ -64,7 +64,7 @@
"runtimeSettings": [
{
"handlerSettings": {
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": {"GCS_AUTO_CONFIG":true}
}
@@ -77,7 +77,7 @@
"runtimeSettings": [
{
"handlerSettings": {
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": {"enableGenevaUpload":true}
}
diff --git a/tests/data/hostgaplugin/ext_conf-rsm_version_properties_false.xml b/tests/data/hostgaplugin/ext_conf-rsm_version_properties_false.xml
index e1f1d6ba8..6590c562d 100644
--- a/tests/data/hostgaplugin/ext_conf-rsm_version_properties_false.xml
+++ b/tests/data/hostgaplugin/ext_conf-rsm_version_properties_false.xml
@@ -64,7 +64,7 @@
"runtimeSettings": [
{
"handlerSettings": {
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": {"GCS_AUTO_CONFIG":true}
}
@@ -77,7 +77,7 @@
"runtimeSettings": [
{
"handlerSettings": {
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": {"enableGenevaUpload":true}
}
diff --git a/tests/data/hostgaplugin/ext_conf.xml b/tests/data/hostgaplugin/ext_conf.xml
index 8ede27f8a..0e3dec4c8 100644
--- a/tests/data/hostgaplugin/ext_conf.xml
+++ b/tests/data/hostgaplugin/ext_conf.xml
@@ -58,7 +58,7 @@
"runtimeSettings": [
{
"handlerSettings": {
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent==",
"publicSettings": {"GCS_AUTO_CONFIG":true}
}
@@ -71,7 +71,7 @@
"runtimeSettings": [
{
"handlerSettings": {
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent==",
"publicSettings": {"enableGenevaUpload":true}
}
diff --git a/tests/data/hostgaplugin/vm_settings-agent_family_version.json b/tests/data/hostgaplugin/vm_settings-agent_family_version.json
index 734cc8147..99d435e51 100644
--- a/tests/data/hostgaplugin/vm_settings-agent_family_version.json
+++ b/tests/data/hostgaplugin/vm_settings-agent_family_version.json
@@ -60,7 +60,7 @@
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": "{\"GCS_AUTO_CONFIG\":true}"
}
@@ -78,7 +78,7 @@
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": "{\"enableGenevaUpload\":true}"
}
diff --git a/tests/data/hostgaplugin/vm_settings-difference_in_required_features.json b/tests/data/hostgaplugin/vm_settings-difference_in_required_features.json
index 71cdbf5c5..f36524e28 100644
--- a/tests/data/hostgaplugin/vm_settings-difference_in_required_features.json
+++ b/tests/data/hostgaplugin/vm_settings-difference_in_required_features.json
@@ -56,7 +56,7 @@
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": "{\"GCS_AUTO_CONFIG\":true}"
}
@@ -76,7 +76,7 @@
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": "{\"enableGenevaUpload\":true}"
}
diff --git a/tests/data/hostgaplugin/vm_settings-out-of-sync.json b/tests/data/hostgaplugin/vm_settings-out-of-sync.json
index 0d4806af9..d971bcaa8 100644
--- a/tests/data/hostgaplugin/vm_settings-out-of-sync.json
+++ b/tests/data/hostgaplugin/vm_settings-out-of-sync.json
@@ -56,7 +56,7 @@
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": "{\"GCS_AUTO_CONFIG\":true}"
}
diff --git a/tests/data/hostgaplugin/vm_settings-requested_version_properties_false.json b/tests/data/hostgaplugin/vm_settings-requested_version_properties_false.json
index 3a6eb8b1a..d902d9471 100644
--- a/tests/data/hostgaplugin/vm_settings-requested_version_properties_false.json
+++ b/tests/data/hostgaplugin/vm_settings-requested_version_properties_false.json
@@ -60,7 +60,7 @@
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": "{\"GCS_AUTO_CONFIG\":true}"
}
@@ -78,7 +78,7 @@
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEFpB/HKM/7evRk+DBz754wUwDQYJKoZIhvcNAQEBBQAEggEADPJwniDeIUXzxNrZCloitFdscQ59Bz1dj9DLBREAiM8jmxM0LLicTJDUv272Qm/4ZQgdqpFYBFjGab/9MX+Ih2x47FkVY1woBkckMaC/QOFv84gbboeQCmJYZC/rZJdh8rCMS+CEPq3uH1PVrvtSdZ9uxnaJ+E4exTPPviIiLIPtqWafNlzdbBt8HZjYaVw+SSe+CGzD2pAQeNttq3Rt/6NjCzrjG8ufKwvRoqnrInMs4x6nnN5/xvobKIBSv4/726usfk8Ug+9Q6Benvfpmre2+1M5PnGTfq78cO3o6mI3cPoBUjp5M0iJjAMGeMt81tyHkimZrEZm6pLa4NQMOEjArBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECC5nVaiJaWt+gAhgeYvxUOYHXw==",
"publicSettings": "{\"enableGenevaUpload\":true}"
}
diff --git a/tests/data/hostgaplugin/vm_settings.json b/tests/data/hostgaplugin/vm_settings.json
index 1f6d44deb..1a4d7e9a7 100644
--- a/tests/data/hostgaplugin/vm_settings.json
+++ b/tests/data/hostgaplugin/vm_settings.json
@@ -53,10 +53,11 @@
"runAsStartupTask": false,
"isJson": true,
"useExactVersion": true,
+ "encodedSignature": "MIInEAYJKoZIhvcNAQcCoIInATCCJv0CAQMxDTALBglghkgBZQMEAgIwCQYHgUuDSAcICaCCDXYwggX0MIID3KADAgECAhMzAAADrzBADkyjTQVBAAAAAAOvMA0GCSqGSIb3DQEBCwUAMH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTEwHhcNMjMxMTE2MTkwOTAwWhcNMjQxMTE0MTkwOTAwWjB0MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMR4wHAYDVQQDExVNaWNyb3NvZnQgQ29ycG9yYXRpb24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDOS8s1ra6f0YGtg0OhEaQa/t3Q+q1MEHhWJhqQVuO5amYXQpy8MDPNoJYk+FWAhePP5LxwcSge5aen+f5Q6WNPd6EDxGzotvVpNi5ve0H97S3F7C/axDfKxyNh21MG0W8Sb0vxi/vorcLHOL9i+t2D6yvvDzLlEefUCbQV/zGCBjXGlYJcUj6RAzXyeNANxSpKXAGd7Fh+ocGHPPphcD9LQTOJgG7Y7aYztHqBLJiQQ4eAgZNU4ac6+8LnEGALgo1ydC5BJEuJQjYKbNTy959HrKSu7LO3Ws0w8jw6pYdC1IMpdTkk2puTgY2PDNzBtLM4evG7FYer3WX+8t1UMYNTAgMBAAGjggFzMIIBbzAfBgNVHSUEGDAWBgorBgEEAYI3TAgBBggrBgEFBQcDAzAdBgNVHQ4EFgQURxxxNPIEPGSO8kqz+bgCAQWGXsEwRQYDVR0RBD4wPKQ6MDgxHjAcBgNVBAsTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEWMBQGA1UEBRMNMjMwMDEyKzUwMTgyNjAfBgNVHSMEGDAWgBRIbmTlUAXTgqoXNzcitW2oynUClTBUBgNVHR8ETTBLMEmgR6BFhkNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNDb2RTaWdQQ0EyMDExXzIwMTEtMDctMDguY3JsMGEGCCsGAQUFBwEBBFUwUzBRBggrBgEFBQcwAoZFaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9jZXJ0cy9NaWNDb2RTaWdQQ0EyMDExXzIwMTEtMDctMDguY3J0MAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIBAISxFt/zR2frTFPB45YdmhZpB2nNJoOoi+qlgcTlnO4QwlYN1w/vYwbDy/oFJolD5r6FMJd0RGcgEM8q9TgQ2OC7gQEmhweVJ7yuKJlQBH7P7Pg5RiqgV3cSonJ+OM4kFHbP3gPLiyzssSQdRuPY1mIWoGg9i7Y4ZC8ST7WhpSyc0pns2XsUe1XsIjaUcGu7zd7gg97eCUiLRdVklPmpXobH9CEAWakRUGNICYN2AgjhRTC4j3KJfqMkU04R6Toyh4/Toswm1uoDcGr5laYnTfcX3u5WnJqJLhuPe8Uj9kGAOcyo0O1mNwDa+LhFEzB6CB32+wfJMumfr6degvLTe8x55urQLeTjimBQgS49BSUkhFN7ois3cZyNpnrMca5AZaC7pLI72vuqSsSlLalGOcZmPHZGYJqZ0BacN274OZ80Q8B11iNokns9Od348bMb5Z4fihxaBWebl8kWEi2OPvQImOAeq3nt7UWJBzJYLAGEpfasaA3ZQgIcEXdD+uwo6ymMzDY6UamFOfYqYWXkntxDGu7ngD2ugKUuccYKJJRiiz+LAUcj90BVcSHRLQop9N8zoALr/1sJuwPrVAtxHNEgSW+AKBqIxYWM4Ev32l6agSUAezLMbq5f3d8x9qzT031jMDT+sUAoCw0M5wVtCUQcqINPuYjbS1WgJyZIiEkBMIIHejCCBWKgAwIBAgIKYQ6Q0gAAAAAAAzANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEyMDAGA1UEAxMpTWljcm9zb2Z0IFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTEwHhcNMTEwNzA4MjA1OTA5WhcNMjYwNzA4MjEwOTA5WjB+MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSgwJgYDVQQDEx9NaWNyb3NvZnQgQ29kZSBTaWduaW5nIFBDQSAyMDExMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq/D6chAcLq3YbqqCEE00uvK2WCGfQhsqa+laUKq4BjgaBEm6f8MMHt03a8YS2AvwOMKZBrDIOdUBFDFC04kNeWSHfpRgJGyvnkmc6Whe0t+bU7IKLMOv2akrrnoJr9eWWcpgGgXpZnboMlImEi/nqwhQz7NEt13YxC4Ddato88tt8zpcoRb0RrrgOGSsbmQ1eKagYw8t00CT+OPeBw3VXHmlSSnnDb6gE3e+lD3v++MrWhAfTVYoonpy4BI6t0le2O3tQ5GD2Xuye4Yb2T6xjF3oiU+EGvKhL1nkkDstrjNYxbc+/jLTswM9sbKvkjh+0p2ALPVOVpEhNSXDOW5kf1O6nA+tGSOEy/S6A4aN91/w0FK/jJSHvMAhdCVfGCi2zCcoOCWYOUo2z3yxkq4cI6epZuxhH2rhKEmdX4jiJV3TIUs+UsS1Vz8kA/DRelsv1SPjcF0PUUZ3s/gA4bysAoJf28AVs70b1FVL5zmhD+kjSbwYuER8ReTBw3J64HLnJN+/RpnF78IcV9uDjexNSTCnq47f7Fufr/zdsGbiwZeBe+3W7UvnSSmnEyimp31ngOaKYnhfsi+E11ecXL93KCjx7W3DKI8sj0A3T8HhhUSJxAlMxdSlQy90lfdu+HggWCwTXWCVmj5PM4TasIgX3p5O9JawvEagbJjS4NaIjAsCAwEAAaOCAe0wggHpMBAGCSsGAQQBgjcVAQQDAgEAMB0GA1UdDgQWBBRIbmTlUAXTgqoXNzcitW2oynUClTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBRyLToCMZBDuRQFTuHqp8cx0SOJNDBaBgNVHR8EUzBRME+gTaBLhklodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpL2NybC9wcm9kdWN0cy9NaWNSb29DZXJBdXQyMDExXzIwMTFfMDNfMjIuY3JsMF4GCCsGAQUFBwEBBFIwUDBOBggrBgEFBQcwAoZCaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraS9jZXJ0cy9NaWNSb29DZXJBdXQyMDExXzIwMTFfMDNfMjIuY3J0MIGfBgNVHSAEgZcwgZQwgZEGCSsGAQQBgjcuAzCBgzA/BggrBgEFBQcCARYzaHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraW9wcy9kb2NzL3ByaW1hcnljcHMuaHRtMEAGCCsGAQUFBwICMDQeMiAdAEwAZQBnAGEAbABfAHAAbwBsAGkAYwB5AF8AcwB0AGEAdABlAG0AZQBuAHQALiAdMA0GCSqGSIb3DQEBCwUAA4ICAQBn8oalmOBUeRou09h0ZyKbC5YR4WOSmUKWfdJ5DJDBZV8uLD74w3LRbYP+vj/oCso7v0epo/Np22O/IjWll11lhJB9i0ZQVdgMknzSGksc8zxCi1LQsP1r4z4HLimb5j0bpdS1HXeUOeLpZMlEPXh6I/MTfaaQdION9MsmAkYqwooQu6SpBQyb7Wj6aC6VoCo/KmtYSWMfCWluWpiW5IP0wI/zRive/DvQvTXvbiWu5a8n7dDd8w6vmSiXmE0OPQvyCInWH8MyGOLwxS3OW560STkKxgrCxq2u5bLZ2xWIUUVYODJxJxp/sfQn+N4sOiBpmLJZiWhub6e3dMNABQamASooPoI/E01mC8CzTfXhj38cbxV9Rad25UAqZaPDXVJihsMdYzaXht/a8/jyFqGaJ+HNpZfQ7l1jQeNbB5yHPgZ3BtEGsXUfFL5hYbXw3MYbBL7fQccOKO7eZS/sl/ahXJbYANahRr1Z85elCUtIEJmAH9AAKcWxm6U/RXceNcbSoqKfenoi+kiVH6v7RyOA9Z74v2u3S5fi63V4GuzqN5l5GEv/1rMjaHXmr/r8i+sLgOppO6/8MO0ETI7f33VtY5E90Z1WTk+/gFcioXgRMiF670EKsT/7qMykXcGhiJtXcVZOSEXAQsmbdlsKgEhr/Xmfwb1tbWrJUnMTDXpQzTGCGWIwghleAgEBMIGVMH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTECEzMAAAOvMEAOTKNNBUEAAAAAA68wCwYJYIZIAWUDBAICoFkwFgYJKoZIhvcNAQkDMQkGB4FLg0gHCAkwPwYJKoZIhvcNAQkEMTIEMDBbd8WC98w2hp0LRsyGXkhY0ZY+y0Pl20deVXonOXR+vDsyK96L9uBzpNRlolZD0DANBgkqhkiG9w0BAQEFAASCAQAIaK9t6Unz6YcKR2q8D2Vjvq9j+YK0U1+tb8s2ZslmmL19Yeb+NRy4tkS7lVEmMYRiFTy+jyis6UGL81ziXEXqAfqjkJt/zjN/8Qek91fzKYJMuCfEm6xVv+gfNHCp0fuGn4b9QNoD7UUMe4oBskSSLSiW0ri9FblSdjeoLZKvoRzHFBF94wI2Kw0iCBUQgNKHKT3lyG9D4NQySAaS0BnYG/s/HPgGMPT6peWRWAXkuTQ8zxb98pOzdf3HZ4Zz2n8qEh1BM6nHba2CKnDP0yjEz7OERVWcLUVPcTHC/xG94cp1gdlKQ09t3H7lBwccxmztUt9sIGUAdeJFAChTvvnSoYIXRDCCF0AGCyqGSIb3DQEJEAIOMYIXLzCCFysGCSqGSIb3DQEHAqCCFxwwghcYAgEDMQ8wDQYJYIZIAWUDBAIBBQAwggFzBgsqhkiG9w0BCRABBKCCAWIEggFeMIIBWgIBAQYKKwYBBAGEWQoDATAxMA0GCWCGSAFlAwQCAQUABCALbe+1JlANO/4xRH8dJHYO8uMX6ee/KhxzL1ZHE4fguAIGZnLzb33XGBMyMDI0MDYyMDIzMzgyOS4yMzNaMASAAgH0AhgsprYE/OXhkFp093+I2SkmqEFqhU3g+VWggdikgdUwgdIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xLTArBgNVBAsTJE1pY3Jvc29mdCBJcmVsYW5kIE9wZXJhdGlvbnMgTGltaXRlZDEmMCQGA1UECxMdVGhhbGVzIFRTUyBFU046ODZERi00QkJDLTkzMzUxJTAjBgNVBAMTHE1pY3Jvc29mdCBUaW1lLVN0YW1wIFNlcnZpY2WgghF4MIIHJzCCBQ+gAwIBAgITMwAAAd1dVx2V1K2qGwABAAAB3TANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYDVQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMDAeFw0yMzEwMTIxOTA3MDlaFw0yNTAxMTAxOTA3MDlaMIHSMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMS0wKwYDVQQLEyRNaWNyb3NvZnQgSXJlbGFuZCBPcGVyYXRpb25zIExpbWl0ZWQxJjAkBgNVBAsTHVRoYWxlcyBUU1MgRVNOOjg2REYtNEJCQy05MzM1MSUwIwYDVQQDExxNaWNyb3NvZnQgVGltZS1TdGFtcCBTZXJ2aWNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAqE4DlETqLnecdREfiWd8oun70m+Km5O1y1qKsLExRKs9LLkJYrYO2uJA/5PnYdds3aDsCS1DWlBltMMYXMrp3Te9hg2sI+4kr49Gw/YU9UOMFfLmastEXMgcctqIBqhsTm8Um6jFnRlZ0owKzxpyOEdSZ9pj7v38JHu434Hj7GMmrC92lT+anSYCrd5qvIf4Aqa/qWStA3zOCtxsKAfCyq++pPqUQWpimLu4qfswBhtJ4t7Skx1q1XkRbo1Wdcxg5NEq4Y9/J8Ep1KG5qUujzyQbupraZsDmXvv5fTokB6wySjJivj/0KAMWMdSlwdI4O6OUUEoyLXrzNF0t6t2lbRsFf0QO7HbMEwxoQrw3LFrAIS4Crv77uS0UBuXeFQq27NgLUVRm5SXYGrpTXtLgIqypHeK0tP2o1xvakAniOsgN2WXlOCip5/mCm/5hy8EzzfhtcU3DK13e6MMPbg/0N3zF9Um+6aOwFBCQrlP+rLcetAny53WcdK+0VWLlJr+5sa5gSlLyAXoYNY3n8pu94WR2yhNUg+jymRaGM+zRDucDn64HFAHjOWMSMrPlZbsEDjCmYWbbh+EGZGNXg1un6fvxyACO8NJ9OUDoNgFy/aTHUkfZ0iFpGdJ45d49PqEwXQiXn3wsy7SvDflWJRZwBCRQ1RPFGeoYXHPnD5m6wwMCAwEAAaOCAUkwggFFMB0GA1UdDgQWBBRuovW2jI9R2kXLIdIMpaPQjiXD8TAfBgNVHSMEGDAWgBSfpxVdAF5iXYP05dJlpxtTNRnpcjBfBgNVHR8EWDBWMFSgUqBQhk5odHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNyb3NvZnQlMjBUaW1lLVN0YW1wJTIwUENBJTIwMjAxMCgxKS5jcmwwbAYIKwYBBQUHAQEEYDBeMFwGCCsGAQUFBzAChlBodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL2NlcnRzL01pY3Jvc29mdCUyMFRpbWUtU3RhbXAlMjBQQ0ElMjAyMDEwKDEpLmNydDAMBgNVHRMBAf8EAjAAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMIMA4GA1UdDwEB/wQEAwIHgDANBgkqhkiG9w0BAQsFAAOCAgEALlTZsg0uBcgdZsxypW5/2ORRP8rzPIsG+7mHwmuphHbP95o7bKjU6hz1KHK/Ft70ZkO7uSRTPFLInUhmSxlnDoUOrrJk1Pc8SMASdESlEEvxL6ZteD47hUtLQtKZvxchmIuxqpnR8MRy/cd4D7/L+oqcJBaReCGloQzAYxDNGSEbBwZ1evXMalDsdPG9+7nvEXFlfUyQqdYUQ0nq6t37i15SBePSeAg7H/+Xdcwrce3xPb7O8Yk0AX7n/moGTuevTv3MgJsVe/G2J003l6hd1b72sAiRL5QYPX0Bl0Gu23p1n450Cq4GIORhDmRV9QwpLfXIdA4aCYXG4I7NOlYdqWuql0iWWzLwo2yPlT2w42JYB3082XIQcdtBkOaL38E2U5jJO3Rh6EtsOi+ZlQ1rOTv0538D3XuaoJ1OqsTHAEZQ9sw/7+91hSpomym6kGdS2M5//voMCFXLx797rNH3w+SmWaWI7ZusvdDesPr5kJV2sYz1GbqFQMEGS9iH5iOYZ1xDkcHpZP1F5zz6oMeZuEuFfhl1pqt3n85d4tuDHZ/svhBBCPcqCqOoM5YidWE0TWBi1NYsd7jzzZ3+Tsu6LQrWDwRmsoPuZo6uwkso8qV6Bx4n0UKpjWwNQpSFFrQQdRb5mQouWiEqtLsXCN2sg1aQ8GBtDOcKN0TabjtCNNswggdxMIIFWaADAgECAhMzAAAAFcXna54Cm0mZAAAAAAAVMA0GCSqGSIb3DQEBCwUAMIGIMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIwMAYDVQQDEylNaWNyb3NvZnQgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgMjAxMDAeFw0yMTA5MzAxODIyMjVaFw0zMDA5MzAxODMyMjVaMHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAyMDEwMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA5OGmTOe0ciELeaLL1yR5vQ7VgtP97pwHB9KpbE51yMo1V/YBf2xK4OK9uT4XYDP/XE/HZveVU3Fa4n5KWv64NmeFRiMMtY0Tz3cywBAY6GB9alKDRLemjkZrBxTzxXb1hlDcwUTIcVxRMTegCjhuje3XD9gmU3w5YQJ6xKr9cmmvHaus9ja+NSZk2pg7uhp7M62AW36MEBydUv626GIl3GoPz130/o5Tz9bshVZN7928jaTjkY+yOSxRnOlwaQ3KNi1wjjHINSi947SHJMPgyY9+tVSP3PoFVZhtaDuaRr3tpK56KTesy+uDRedGbsoy1cCGMFxPLOJiss254o2I5JasAUq7vnGpF1tnYN74kpEeHT39IM9zfUGaRnXNxF803RKJ1v2lIH1+/NmeRd+2ci/bfV+AutuqfjbsNkz2K26oElHovwUDo9Fzpk03dJQcNIIP8BDyt0cY7afomXw/TNuvXsLz1dhzPUNOwTM5TI4CvEJoLhDqhFFG4tG9ahhaYQFzymeiXtcodgLiMxhy16cg8ML6EgrXY28MyTZki1ugpoMhXV8wdJGUlNi5UPkLiWHzNgY1GIRH29wb0f2y1BzFa/ZcUlFdEtsluq9QBXpsxREdcu+N+VLEhReTwDwV2xo3xwgVGD94q0W29R6HXtqPnhZyacaue7e3PmriLq0CAwEAAaOCAd0wggHZMBIGCSsGAQQBgjcVAQQFAgMBAAEwIwYJKwYBBAGCNxUCBBYEFCqnUv5kxJq+gpE8RjUpzxD/LwTuMB0GA1UdDgQWBBSfpxVdAF5iXYP05dJlpxtTNRnpcjBcBgNVHSAEVTBTMFEGDCsGAQQBgjdMg30BATBBMD8GCCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpb3BzL0RvY3MvUmVwb3NpdG9yeS5odG0wEwYDVR0lBAwwCgYIKwYBBQUHAwgwGQYJKwYBBAGCNxQCBAweCgBTAHUAYgBDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAU1fZWy4/oolxiaNE9lJBb186aGMQwVgYDVR0fBE8wTTBLoEmgR4ZFaHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraS9jcmwvcHJvZHVjdHMvTWljUm9vQ2VyQXV0XzIwMTAtMDYtMjMuY3JsMFoGCCsGAQUFBwEBBE4wTDBKBggrBgEFBQcwAoY+aHR0cDovL3d3dy5taWNyb3NvZnQuY29tL3BraS9jZXJ0cy9NaWNSb29DZXJBdXRfMjAxMC0wNi0yMy5jcnQwDQYJKoZIhvcNAQELBQADggIBAJ1VffwqreEsH2cBMSRb4Z5yS/ypb+pcFLY+TkdkeLEGk5c9MTO1OdfCcTY/2mRsfNB1OW27DzHkwo/7bNGhlBgi7ulmZzpTTd2YurYeeNg2LpypglYAA7AFvonoaeC6Ce5732pvvinLbtg/SHUB2RjebYIM9W0jVOR4U3UkV7ndn/OOPcbzaN9l9qRWqveVtihVJ9AkvUCgvxm2EhIRXT0n4ECWOKz3+SmJw7wXsFSFQrP8DJ6LGYnn8AtqgcKBGUIZUnWKNsIdw2FzLixre24/LAl4FOmRsqlb30mjdAy87JGA0j3mSj5mO0+7hvoyGtmW9I/2kQH2zsZ0/fZMcm8Qq3UwxTSwethQ/gpY3UA8x1RtnWN0SCyxTkctwRQEcb9k+SS+c23Kjgm9swFXSVRk2XPXfx5bRAGOWhmRaw2fpCjcZxkoJLo4S5pu+yFUa2pFEUep8beuyOiJXk+d0tBMdrVXVAmxaQFEfnyhYWxz/gq77EFmPWn9y8FBSX5+k77L+DvktxW/tM4+pTFRhLy/AsGConsXHRWJjXD+57XQKBqJC4822rpM+Zv/Cuk0+CQ1ZyvgDbjmjJnW4SLq8CdCPSWU5nR0W2rRnj7tfqAxM328y+l7vzhwRNGQ8cirOoo6CGJ/2XBjU02N7oJtpQUQwXEGahC0HVUzWLOhcGbyoYIC1DCCAj0CAQEwggEAoYHYpIHVMIHSMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMS0wKwYDVQQLEyRNaWNyb3NvZnQgSXJlbGFuZCBPcGVyYXRpb25zIExpbWl0ZWQxJjAkBgNVBAsTHVRoYWxlcyBUU1MgRVNOOjg2REYtNEJCQy05MzM1MSUwIwYDVQQDExxNaWNyb3NvZnQgVGltZS1TdGFtcCBTZXJ2aWNloiMKAQEwBwYFKw4DAhoDFQA2I0cZZds1oM/GfKINsQ5yJKMWEKCBgzCBgKR+MHwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1wIFBDQSAyMDEwMA0GCSqGSIb3DQEBBQUAAgUA6h4aiTAiGA8yMDI0MDYyMDExMDMzN1oYDzIwMjQwNjIxMTEwMzM3WjB0MDoGCisGAQQBhFkKBAExLDAqMAoCBQDqHhqJAgEAMAcCAQACAgX7MAcCAQACAhH8MAoCBQDqH2wJAgEAMDYGCisGAQQBhFkKBAIxKDAmMAwGCisGAQQBhFkKAwKgCjAIAgEAAgMHoSChCjAIAgEAAgMBhqAwDQYJKoZIhvcNAQEFBQADgYEAGfu+JpdwJYpU+xUOu693Nef9bUv1la7pxXUtY+P82b5q8/FFZp5WUobGx6JrVuJTDuvqbEZYjwTzWIVUHog1kTXjji1NCFLCVnrlJqPwtH9uRQhnFDSmiP0tG1rNwht6ZViFrRexp+7cebOHSPfk+ZzrUyp9DptMAJmagfLClxAxggQNMIIECQIBATCBkzB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYDVQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMAITMwAAAd1dVx2V1K2qGwABAAAB3TANBglghkgBZQMEAgEFAKCCAUowGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEEMC8GCSqGSIb3DQEJBDEiBCCZX/UOu+vfJ4kbHbQYoi1Ztz4aZycnWIB1vBYNNo/atDCB+gYLKoZIhvcNAQkQAi8xgeowgecwgeQwgb0EIGH/Di2aZaxPeJmce0fRWTftQI3TaVHFj5GI43rAMWNmMIGYMIGApH4wfDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEmMCQGA1UEAxMdTWljcm9zb2Z0IFRpbWUtU3RhbXAgUENBIDIwMTACEzMAAAHdXVcdldStqhsAAQAAAd0wIgQg5Fd0dBTHG2u3SYEF2YcmJ7rHH4kHcV0GlSr/y6AQOYEwDQYJKoZIhvcNAQELBQAEggIAGcOQBnVMUPnu4d2wmccNjUncMe5i0C5VkJ7/VjqN4W6vSuKz7BFVIaUMoufkY94epjipx+Ip3BTj2heew7xB+f6zBKTlkXfakH7TEWeju3WzUYNt3kjJyS3SJeJGFJEiln1S6apObwPtbSq9EqwwFOt8pJy9bAvoxuRM6Olib/eiHr3uiKkk6FCccUgG0PYN/PRUU7htzv6uyRXzCpuNpld3eorXt6nqt6bP7k1NFcwcYSv7V3WcoQzObk5Y9G5n/1rc5Hy9eRHwnz1l7MWOZGsJ9swOBFmoVUK8tB1vPy3bjooJBm7jRT9AcdGTaRS/t5nYe5sECI51sIyq3UBPCH8rNse1BIX9WCtcar1Bg6L64lzdPC7FVSh03vVlDZhNNf7tWRZqlYID2zTaY4p4LIW47O0/Rw2Swe4+hvl49e0v0m0FnmmwXN5097waF3Xv7FIDxbcrK+0DTv2p810Igwj6tErwxhP/367Q9EBzxODSJ8uD35DGMmHsTnViavQUBzj8LeTiA6sUZhF54AbI5dQkZLPydlR3GCmo1RKKO1VhDZnpFanj/N856MOlQqe/6x8sguPM+OpF6MWGvQH5SxsSzSf6dxhzS2pEHbirwJ4k1+tuF0LKOxNLwVVQQ9qPABNiWqml4bJk9oZ1dOTDd9EFjepHqynKk4olY3kq5sA=",
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/Microsoft.Azure.Monitor.AzureMonitorLinuxAgent==",
"publicSettings": "{\"GCS_AUTO_CONFIG\":true}"
}
@@ -76,7 +77,7 @@
"settingsSeqNo": 0,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/Microsoft.Azure.Security.Monitoring.AzureSecurityLinuxAgent==",
"publicSettings": "{\"enableGenevaUpload\":true}"
}
@@ -192,7 +193,7 @@
"isMultiConfig": false,
"settings": [
{
- "protectedSettingsCertThumbprint": "BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F",
+ "protectedSettingsCertThumbprint": "F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9",
"protectedSettings": "MIIBsAYJKoZIhvcNAQcDoIIBoTCCAZ0CAQAxggFpddesZQewdDBgegkxNzA1BgoJkgergres/Microsoft.OSTCExtensions.VMAccessForLinux=="
}
]
diff --git a/tests/data/wire/certs-2.xml b/tests/data/wire/certs-2.xml
index 66a231ee8..e58e0aeac 100644
--- a/tests/data/wire/certs-2.xml
+++ b/tests/data/wire/certs-2.xml
@@ -1,85 +1,85 @@
2012-11-30
- 5
+ 1
Pkcs7BlobWithPfxContents
- MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAUiF8ZYMs9mMa8
-QOEMxDaIhGza+0IwDQYJKoZIhvcNAQEBBQAEggEAQW7GyeRVEhHSU1/dzV0IndH0
-rDQk+27MvlsWTcpNcgGFtfRYxu5bzmp0+DoimX3pRBlSFOpMJ34jpg4xs78EsSWH
-FRhCf3EGuEUBHo6yR8FhXDTuS7kZ0UmquiCI2/r8j8gbaGBNeP8IRizcAYrPMA5S
-E8l1uCrw7DHuLscbVni/7UglGaTfFS3BqS5jYbiRt2Qh3p+JPUfm51IG3WCIw/WS
-2QHebmHxvMFmAp8AiBWSQJizQBEJ1lIfhhBMN4A7NadMWAe6T2DRclvdrQhJX32k
-amOiogbW4HJsL6Hphn7Frrw3CENOdWMAvgQBvZ3EjAXgsJuhBA1VIrwofzlDljCC
-DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIxcvw9qx4y0qAgg0QrINXpC23BWT2
-Fb9N8YS3Be9eO3fF8KNdM6qGf0kKR16l/PWyP2L+pZxCcCPk83d070qPdnJK9qpJ
-6S1hI80Y0oQnY9VBFrdfkc8fGZHXqm5jNS9G32v/AxYpJJC/qrAQnWuOdLtOZaGL
-94GEh3XRagvz1wifv8SRI8B1MzxrpCimeMxHkL3zvJFg9FjLGdrak868feqhr6Nb
-pqH9zL7bMq8YP788qTRELUnL72aDzGAM7HEj7V4yu2uD3i3Ryz3bqWaj9IF38Sa0
-6rACBkiNfZBPgExoMUm2GNVyx8hTis2XKRgz4NLh29bBkKrArK9sYDncE9ocwrrX
-AQ99yn03Xv6TH8bRp0cSj4jzBXc5RFsUQG/LxzJVMjvnkDbwNE41DtFiYz5QVcv1
-cMpTH16YfzSL34a479eNq/4+JAs/zcb2wjBskJipMUU4hNx5fhthvfKwDOQbLTqN
-HcP23iPQIhjdUXf6gpu5RGu4JZ0dAMHMHFKvNL6TNejwx/H6KAPp6rCRsYi6QhAb
-42SXdZmhAyQsFpGD9U5ieJApqeCHfj9Xhld61GqLJA9+WLVhDPADjqHoAVvrOkKH
-OtPegId/lWnCB7p551klAjiEA2/DKxFBIAEhqZpiLl+juZfMXovkdmGxMP4gvNNF
-gbS2k5A0IJ8q51gZcH1F56smdAmi5kvhPnFdy/9gqeI/F11F1SkbPVLImP0mmrFi
-zQD5JGfEu1psUYvhpOdaYDkmAK5qU5xHSljqZFz5hXNt4ebvSlurHAhunJb2ln3g
-AJUHwtZnVBrtYMB0w6fdwYqMxXi4vLeqUiHtIQtbOq32zlSryNPQqG9H0iP9l/G1
-t7oUfr9woI/B0kduaY9jd5Qtkqs1DoyfNMSaPNohUK/CWOTD51qOadzSvK0hJ+At
-033PFfv9ilaX6GmzHdEVEanrn9a+BoBCnGnuysHk/8gdswj9OzeCemyIFJD7iObN
-rNex3SCf3ucnAejJOA0awaLx88O1XTteUjcFn26EUji6DRK+8JJiN2lXSyQokNeY
-ox6Z4hFQDmw/Q0k/iJqe9/Dq4zA0l3Krkpra0DZoWh5kzYUA0g5+Yg6GmRNRa8YG
-tuuD6qK1SBEzmCYff6ivjgsXV5+vFBSjEpx2dPEaKdYxtHMOjkttuTi1mr+19dVf
-hSltbzfISbV9HafX76dhwZJ0QwsUx+aOW6OrnK8zoQc5AFOXpe9BrrOuEX01qrM0
-KX5tS8Zx5HqDLievjir194oi3r+nAiG14kYlGmOTHshu7keGCgJmzJ0iVG/i+TnV
-ZSLyd8OqV1F6MET1ijgR3OPL3kt81Zy9lATWk/DgKbGBkkKAnXO2HUw9U34JFyEy
-vEc81qeHci8sT5QKSFHiP3r8EcK8rT5k9CHpnbFmg7VWSMVD0/wRB/C4BiIw357a
-xyJ/q1NNvOZVAyYzIzf9TjwREtyeHEo5kS6hyWSn7fbFf3sNGO2I30veWOvE6kFA
-HMtF3NplOrTYcM7fAK5zJCBK20oU645TxI8GsICMog7IFidFMdRn4MaXpwAjEZO4
-44m2M+4XyeRCAZhp1Fu4mDiHGqgd44mKtwvLACVF4ygWZnACDpI17X88wMnwL4uU
-vgehLZdAE89gvukSCsET1inVBnn/hVenCRbbZ++IGv2XoYvRfeezfOoNUcJXyawQ
-JFqN0CRB5pliuCesTO2urn4HSwGGoeBd507pGWZmOAjbNjGswlJJXF0NFnNW/zWw
-UFYy+BI9axuhWTSnCXbNbngdNQKHznKe1Lwit6AI3U9jS33pM3W+pwUAQegVdtpG
-XT01YgiMCBX+b8B/xcWTww0JbeUwKXudzKsPhQmaA0lubAo04JACMfON8jSZCeRV
-TyIzgacxGU6YbEKH4PhYTGl9srcWIT9iGSYD53V7Kyvjumd0Y3Qc3JLnuWZT6Oe3
-uJ4xz9jJtoaTDvPJQNK3igscjZnWZSP8XMJo1/f7vbvD57pPt1Hqdirp1EBQNshk
-iX9CUh4fuGFFeHf6MtGxPofbXmvA2GYcFsOez4/2eOTEmo6H3P4Hrya97XHS0dmD
-zFSAjzAlacTrn1uuxtxFTikdOwvdmQJJEfyYWCB1lqWOZi97+7nzqyXMLvMgmwug
-ZF/xHFMhFTR8Wn7puuwf36JpPQiM4oQ/Lp66zkS4UlKrVsmSXIXudLMg8SQ5WqK8
-DjevEZwsHHaMtfDsnCAhAdRc2jCpyHKKnmhCDdkcdJJEymWKILUJI5PJ3XtiMHnR
-Sa35OOICS0lTq4VwhUdkGwGjRoY1GsriPHd6LOt1aom14yJros1h7ta604hSCn4k
-zj9p7wY9gfgkXWXNfmarrZ9NNwlHxzgSva+jbJcLmE4GMX5OFHHGlRj/9S1xC2Wf
-MY9orzlooGM74NtmRi4qNkFj3dQCde8XRR4wh2IvPUCsr4j+XaoCoc3R5Rn/yNJK
-zIkccJ2K14u9X/A0BLXHn5Gnd0tBYcVOqP6dQlW9UWdJC/Xooh7+CVU5cZIxuF/s
-Vvg+Xwiv3XqekJRu3cMllJDp5rwe5EWZSmnoAiGKjouKAIszlevaRiD/wT6Zra3c
-Wn/1U/sGop6zRscHR7pgI99NSogzpVGThUs+ez7otDBIdDbLpMjktahgWoi1Vqhc
-fNZXjA6ob4zTWY/16Ys0YWxHO+MtyWTMP1dnsqePDfYXGUHe8yGxylbcjfrsVYta
-4H6eYR86eU3eXB+MpS/iA4jBq4QYWR9QUkd6FDfmRGgWlMXhisPv6Pfnj384NzEV
-Emeg7tW8wzWR64EON9iGeGYYa2BBl2FVaayMEoUhthhFcDM1r3/Mox5xF0qnlys4
-goWkMzqbzA2t97bC0KDGzkcHT4wMeiJBLDZ7S2J2nDAEhcTLY0P2zvOB4879pEWx
-Bd15AyG1DvNssA5ooaDzKi/Li6NgDuMJ8W7+tmsBwDvwuf2N3koqBeXfKhR4rTqu
-Wg1k9fX3+8DzDf0EjtDZJdfWZAynONi1PhZGbNbaMKsQ+6TflkCACInRdOADR5GM
-rL7JtrgF1a9n0HD9vk2WGZqKI71tfS8zODkOZDD8aAusD2DOSmVZl48HX/t4i4Wc
-3dgi/gkCMrfK3wOujb8tL4zjnlVkM7kzKk0MgHuA1w81zFjeMFvigHes4IWhQVcz
-ek3l4bGifI2kzU7bGIi5e/019ppJzGsVcrOE/3z4GS0DJVk6fy7MEMIFx0LhJPlL
-T+9HMH85sSYb97PTiMWpfBvNw3FSC7QQT9FC3L8d/XtMY3NvZoc7Fz7cSGaj7NXG
-1OgVnAzMunPa3QaduoxMF9346s+4a+FrpRxL/3bb4skojjmmLqP4dsbD1uz0fP9y
-xSifnTnrtjumYWMVi+pEb5kR0sTHl0XS7qKRi3SEfv28uh72KdvcufonIA5rnEb5
-+yqAZiqW2OxVsRoVLVODPswP4VIDiun2kCnfkQygPzxlZUeDZur0mmZ3vwC81C1Q
-dZcjlukZcqUaxybUloUilqfNeby+2Uig0krLh2+AM4EqR63LeZ/tk+zCitHeRBW0
-wl3Bd7ShBFg6kN5tCJlHf/G6suIJVr+A9BXfwekO9+//CutKakCwmJTUiNWbQbtN
-q3aNCnomyD3WjvUbitVO0CWYjZrmMLIsPtzyLQydpT7tjXpHgvwm5GYWdUGnNs4y
-NbA262sUl7Ku/GDw1CnFYXbxl+qxbucLtCdSIFR2xUq3rEO1MXlD/txdTxn6ANax
-hi9oBg8tHzuGYJFiCDCvbVVTHgWUSnm/EqfclpJzGmxt8g7vbaohW7NMmMQrLBFP
-G6qBypgvotx1iJWaHVLNNiXvyqQwTtelNPAUweRoNawBp/5KTwwy/tHeF0gsVQ7y
-mFX4umub9YT34Lpe7qUPKNxXzFcUgAf1SA6vyZ20UI7p42S2OT2PrahJ+uO6LQVD
-+REhtN0oyS3G6HzAmKkBgw7LcV3XmAr39iSR7mdmoHSJuI9bjveAPhniK+N6uuln
-xf17Qnw5NWfr9MXcLli7zqwMglU/1bNirkwVqf/ogi/zQ3JYCo6tFGf/rnGQAORJ
-hvOq2SEYXnizPPIH7VrpE16+jUXwgpiQ8TDyeLPmpZVuhXTXiCaJO5lIwmLQqkmg
-JqNiT9V44sksNFTGNKgZo5O9rEqfqX4dLjfv6pGJL+MFXD9if4f1JQiXJfhcRcDh
-Ff9B6HukgbJ1H96eLUUNj8sL1+WPOqawkS4wg7tVaERE8CW7mqk15dCysn9shSut
-I+7JU7+dZsxpj0ownrxuPAFuT8ZlcBPrFzPUwTlW1G0CbuEco8ijfy5IfbyGCn5s
-K/0bOfAuNVGoOpLZ1dMki2bGdBwQOQlkLKhAxYcCVQ0/urr1Ab+VXU9kBsIU8ssN
-GogKngYpuUV0PHmpzmobielOHLjNqA2v9vQSV3Ed48wRy5OCwLX1+vYmYlggMDGt
-wfl+7QbXYf+k5WnELf3IqYvh8ZWexa0=
+ MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAU08PI+CBUqOd4
+Nbte7MLw2qCYn1UwDQYJKoZIhvcNAQEBBQAEggEAU1y8uuEyQMXa7eGlK/PB5F5+
+ZEYBHRpBpSKlyIpTJhN+emNLtuPRlIJ0L0zlfkvjMmnoApXujUb91tnHVQu2tUV4
+9Ws3goQjqIb6baQmxf8pctsL56vHts763Wl+AwiFLc7twoq/4FmmqwvFzxHE+c2o
+IyxxYY72ZNorN5sux0b+ghEeZHkdds6uR/DHtht+zCy/JP63Phf53dAoUoO4p9Ym
+WJhe2Mccv9t/yrtneVEIw/p1GqUPSY+tiGMNMxNvXlUrtdoaUzyzzXmqVbMXb6PB
+bWFtkkRJBCMYA8Ozh4La6y8Y1jgFj6vCkoxX3s9GVQbpeyon7leanAiHwArgejCC
+DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIY87YJhlLuuSAgg0QnoSp+Z+aYRAI
+uNaSDIyvQ/1/xYMW6TCqp19yiOGRu5bzDNX0tKN5cCLIvRX5FZmLbLbApziZlMsV
+wrHCmVBnN8XYCdZsK+Wy39ORULAfurkjem6arn/NFnfN9DLiSEYwKbSC4VNegfkT
+lJlgnSVUs7Z6v86YUEuwBnmvyCDbIit3PbfKJzaCr9DSPXKwBFRZqTTsFWovBGaA
+cQvbuqxbbkm4cNYmwmT84TXhjYDuTfP8KEPgdBD1F8cqB+e6OuQSG3N+tBHKi7DH
+Gc+30IimJVcrwbPCNDlteHHTLxaeDM4g3eoyj7B6J+/kAMLdoWuH9kwdr75Dd5OJ
+SGY7utJ+v4A92SKc7G01tQnHZYOxn+JFKWQ4y/CR2lTtYfhh8pd9jSSHsg0jGtKs
+Zte/mpfrOHTpXd3K7F1/UiXTRNWbfy/7pBWPdqgSaOAuVH180VAHCDnaOtvf2w7L
+tJN74gesbcwPgQiAiD9um1eOqOMObu3gqXdeIkMksbhrTSOzLuO8c3t0R+8lL6QE
+2K54t7PMDQ8ScmktNMWG9heBbZmAlkLZ2VK+jfGpbVEGRSWKRkpBMQOqLGh7iRkv
+EPtr44/F5cWwXVN6ofCg25aGwLrAaD9hlprGNByjGezjrFxj4NSDyKYmjhfF4+RA
+CfEN/j19OadJgY8ByH+L190VOOc3Xcf0aiFJPqV+MmTm0QcOmaOIPFfwRHjWiuS1
+K5kzX15uDgIZED2NWvJtwyuJ+p8xcWtmdE0nGxhOHV+3ZZu8WZ9Qv7LU2eSJQ5De
+5uzb5sDzVZI8zfQ6LX2nF7ilntzxzODcv5Eoor8NQAU5xPKvb66aRa5BV5xzCl8A
+/FY61ztGpCD4DfPHFpCldcHKCPk1qzu/7kL3LQ49DV5GcVwzzanHQaINWo5xhUu1
+XaUcWe7LVOPYvqCrSF8v3dB56RHF1MJMxCNdZo1oVup3FjIU3N4ZUl5qX5Ixetp2
+ftUZHsw3r+cotronsrne8R4gl3PejIc6rVmz7cpnPY6l1T70QEEtnxcHgqIFZeCB
+n3IHOBOlaS3DbtOVzclySUF3z1+Gtk8Entc1ksNX2MwknFUM2AjQWuvjVDm/ZKaY
+kPtbr52IDKURYzDecuBeTuZCq7ztaOqdc0D+sLFn4Z8CBzl0OdOrDU25h/wir/r7
+DiCGFAGuPIVtsaO0C/aLCM0IJlDW9Lj9YMXy5jZ4ziRT6CmarmjO+BLBL9yHK7pR
+rCEJoYRZUyw6nAZNW3EkftxMWJNe00SkJyccMPLgQA6ORnuHC3wo4EBH62vBA4vq
+JszIKm8xselXbAQoyeRtXBVvFEV7gz/3US43K2HoHi+Z9N60LRw7V+aihz+nKTnC
+lioA+owDvgsJmVwuERse8ZaUwXigfKyCUnrbEAYFeSIQyvKs0TG6pAGm2ZjqFJw/
+L0HLPQVUf6HLZY7HD/xCz21X3mL28VZ82Fr/luOqIk187M4CnyudmZX64tS/o+TR
+n9lSJhV4H6y5WCCTSnyGnjcLSm5lMg9H+4vwRB95qfKS9B8ZLSesBbk/VUwCw1fw
+IeR2S1S9PUO+J0lUKGWWrBjDNKIkR5vVLXyazO+BFz6HIq3U0Df9Gya3kng4BfZK
+a3X9ALP1PEdfFeRyH7T83NN20686Q1uSzkKIKmKYp5YRuUsZdrGSSIbgO5UlWayF
+YWQPIrpTy+v2lP9la9YLPdSWG0a/pMA5BFzovHgSJ733yowmw7sqn2wsZyiMTTOy
+lbF7im1hbB3bfzow6SA8IE7O5XiAIyIk35HNJswPMkJWQzzuwNGKIla3f+HfPaRO
+7weJPIEeQr7jUdgiQLl9A9/kHdp8jMy2jwrys6LY9rwEMAodpaN/yXYF9oOFvBsC
+75az848gx7OTB/OcBKFNkeKkdWYo3GYP0DwzTcV3sV+bIllKGzGhuN7KOyn7XLSN
+ZG2kEm/+s05DdxpagcGyAWKT6myDjuMo/lAll/A3bnmwrP/I5YO0bLn2cmEq6dGx
+AcWC5eELHoKo9hv6pjU9BszkHIgMq2B6Oe35xnAi36RlarOU8D4+xop3IqN2Jy65
+eec15LopFUrCcVgSddf7h+qS0jQGiEPuUNZAuZBA0ZVmHzDtkHJqdSpSAXTvykVC
+GIPbCWce/0X9UxxrciJ7foXebz7A9b1dkEMI0UCNBkiO5kGVJBBxGcHOtYvzWc9+
+oRhN68tOksmNFiNIuxTRG1iariPQrDocbsEy+yDDmSxJPZ4wNjPofjZ1XXaXkjs4
+Q79ptA8JLwzHv7dRCsV+r3GUllIn5TOb9adbIowmZG+nSWq3vE1AoHgymwYo064p
+ZlcrtsZRfo9SeqMf3aAOgQtYDpCi2QhCipQYe0IFYWdShdQzxqXyCObm7zey6PnI
+4LZ2J56Z8QXPloo8LfsmcqILWEMOxCc66k5+QFb/MKDV/lYtWZzTES/TFhRdNydw
+yCdizmdTWo2wfk9YU/pcwRZUAzhk+/JQJA0tef6kyUv+ozQue4JVw8UBRoWJRrXf
+mO4kGeEpoVu8Hlk3XVeEQTEMP8gre2t1WSQhgRuUPWHvsVMjRfn4K8rk4MxU94Op
+XselOgz+E0n3XpwHh9gcv43t+qd5YBpE3uAI11hUJpZqsjAo8AiAXppzXZQ9Xx66
+duz3UZLobVZL5CwFuCiaE3b3rx5Qlt9SKNQA8aG6e6N1hwHzl69zT1BN2ZIvrSuL
+ihtQ4E7D6KlEWhPV2c12tMgiDs1CTbOyY5uX8Q+dMilp1Y/5iC6LwzAjJ8IvhtqY
+NniVsVocO9uyRe5cYPLM/F/4rcnnmoIeTbPeGiI91vGnLH+wrgZ/HSntN7C5nG6s
+oay685GW620S0Ac71IcRZajNTM7Rfc9JpCNzwb2WnZw4LKyybfXcHSStk4aqw8P+
+oRsOLgRLO4m9CYnsJBcVX7oF+/IUWyPfL/4sAIUIF+7mXP+Z18paTmbZRIrvjwcA
++QhctZXYVSeUQE4RtLu7pKxTYlZZesZqVhEXj733RMwgYuQecqCMTcF6StpEsKPs
+BUZDXZZrCl9kUMMB7m5bsnBGB3o/QbyS/hkNwI8pVmQHNIVKdKOcxH0cCRouKUH1
+MzYxuZfVS1dvgkhVhPeySy1AZ2A/oBFFz2PWxzftKwaZ5KwDx4VI8x3yYaMuXmvK
+cyIWS+2s+Ky/ofOOAJPYiv2aaKtLnOjo+78oLyAm7NVNaQ31JFVPAxCbmEnIu4Ai
+GngAH4hmVp/f2/pfGq/OI/HFFeAwwsxUKWOsLu+Di7QcT81PrkHVFadmLXxA9iyc
+UmT5Oqg0h4V5PWwaGVfgDMFs7VO0dThZ+cjXLGWvC2bTWpvxJVsgq+J/MCIZsiSJ
+eECBhDvvsKCmigM9+qQ7iPjLWP2DL+CvbLXWLVuaj+rjwpoAx+2ALfWP0aRsetBk
+3vbKm4Pm92401TyGmV8HJfpgMrjbScrmsdv+10ljj3eigaUGGzS0UImJIXEerbia
+3m31u8IaYF0fFsONHa0+0RuEhFVhtgx3ojI9wN6OM4sxIgDMY+Iyrny/Dn4qlVJo
+bmW2hahljpIgT0x9KwZgflyM7VVckRIk+SzJDmqqYdEVk6CnxpKcVJgaD3z/Q4ez
+0doYtQeeK7W4EWNJACosqMCFKnFZlOyMELE0gyhdeCgM1xXOU4nxzzUJXFAKukSi
+6RQANERsNoXnkfYd6Pt39k4IaBkJ3/lmBVdONqoPDjwDJT887kyFo9GfxgOZ+ZAS
+KlVD9YiDSXkgq4/KGq8zNb0jZiZjd02uzzYVvLfKx/TGhVy5WEnf2IeC0gLZ3wNI
+jo0894/Ss0uXbbl5HoOhLdOQbYuZ5QB5S6W6TbcM5Mrt9S0rkJY7xYxnlmXTQ3A7
+q+wfi5IIAIYuRd1uwZ/msCF6L2UM6y0+So5P0X8YVY4tT1Oq8AxjJVLVMZVBPq7b
+nQwChfVf5HOEfNehO52UwRA1C6IGH9/2T6lPrJOuZp7oxUE0CtVYNDbqcj9lbb7A
+cEcQjQzgYnH3xmj1ZjBpyQ9zL5o0g7ZTwAq8zA1LhMBjrgSlYd2s3947Ii4xBaof
+CCA8OVDeqHTqVxFQQk5rrHCDPOSHLCXAqqArXb5yl90Vk1wU7BnPe6iwScCcPbWd
+rkw8twZYLNp7sCDTZ5es77Zzs431R1sc8pL/SOwbv9o30cQfbW9FZAhboyI3o/ug
+RdKYlB72y8wN8ijh/UENo3W89MzHtbZ1XYMCauYn9zDUGci4Bnziqfpd/dV+CUeC
+Fs/DP5f2OkiinHRmf060xj7HN7Q3SWziFbMRVO85/e7jjUcNQyBqikHXBl3V2hpM
+hRPsObhPAoLVxz8fBVMYfxR1E7wTpv5KWzvWSPh4QUX+gRpCYL/h/WJ6qUqjeXMP
+1u6vM7uX9+OjNkEAql9L9cPmm1GIam8yBoRsP/Om0VFKDZUvhTo1QC1Q3finiSm4
+89s7tlobx0KafcD+yNKpSFtq/XUIv3Q=
\ No newline at end of file
diff --git a/tests/data/wire/certs.xml b/tests/data/wire/certs.xml
index 5908de793..2db71ca05 100644
--- a/tests/data/wire/certs.xml
+++ b/tests/data/wire/certs.xml
@@ -1,85 +1,85 @@
2012-11-30
- 3
+ 1
Pkcs7BlobWithPfxContents
- MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAUZcG9X+5aK8VZ
-FY8eJV9j+RImq58wDQYJKoZIhvcNAQEBBQAEggEAn/hOytP/StyRuXHcqFq6x+Za
-7gHfO8prXWdZW4e28NLt/x5ZOBHDDZ6buwwdXEZME0+RoiJvLqP2RNhZkEO8bkna
-pS76xLZE4NXyfxkeEs1vJYis0WJdt/56uCzBuud2SBLuMWoAWgF5alokN0uFpVgm
-CKCos+xv6Pisolc6geM8xQTYe6sLf5Z23LWftWfJqzuo/29glCCre7R80OLeZe5w
-pN6XztbYz06nhVByC35To8Lm0akWAAKU7sfqM1Nty4P0rwUJPKXo42uN1GKYbDbF
-x8piCAd+rs+q4Alu3qK/YaTPpMb2ECRMH6CYB8Klf/CbuWykkfS8zrsnpXT1kzCC
-DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQInjJWFaJcZz2Agg0QX6NlJUH17o20
-90gfjWV01mPmzLKx71JT+hyzKr5vHywDSRI/mdb3RqA59ZrIKeyWr0HXEOuABlul
-nxjc/Rfk1tiLQwh0iqlOjlMtRsxS6yDA0WNwK2Y9gaXcdgDm5Vioai18l4Pd0qzK
-fsof5a/jEJyunW1CZK19QciwfQ2pS8QbRYgeLRZRft2I+kv6cWXlGS6YrMqKQC8t
-QMxnXR4AuzVllPLbbIgtM3l9oS+6jl7jKyKogeroJ9FNLjoMBJLldRLGPRhkCmdJ
-Z1m+s/BAVUH08qgj2kmHzucdULLjlRcmma9m/h91TcQCXHAavf7S+U9QwIyGRh83
-t4Y7EqbQ93mOgjajFzILSL7AT/irgJpDu6CJqMu3EMNDA0mjxn5Cdvj40sufL/g3
-UyBwqosmIwAPzNDmhPtTKvHaHfGY/k8WhoIYfAA5Lhq1z22/RODZOY0Ch2XyxQM4
-s35eppe6IhnwyMv6HfrCrqE/o/16OrvvbaFQTeTlMvU0P7MIR4pVW6tRq4NEa5Wx
-JcvGutuMuzH1VMcqcKdc7wYyOqDOGU43kcV8PiALTIcqrhD8NDrKks1jSkyqQw2h
-sJQckNaQIcCXkUQecQa2UGe0l4HDSJ5gAETSenjyLBKFHf3hxiWBpw446/bODgdk
-0+oyreZqMpRz3vn9LC+Yt7SuVbTzRdx7nlKIiNvo8+btOuVm44evchLFHAq3Ni8D
-c+tP/wss3K4Xdp+t5SvEY/nLIu11Lw44HDVMYTuNz3Ya9psL70ZLaLZM8NromnEl
-CUMRNTPoOC7/KDRh2E9d6c1V4CC43wAsRhksGJnSYoiSVAhaVgLqVFQTsqNHxmcg
-3Y9AEBVzm3fZg6+DxAYu+amb+r8lk0Pp+N1t6rVbKXhkbAAxg0UDO3pY8Xcz0Y3g
-Qdd5rnHh1rJrehku7zTHvQaXEddUWmCoUGIXJ+bt4VOhErL6s5/j8GSG0xmfxgSE
-jnGj4Jwd0Vv19uZjsBDQ54R88GcA9YX8r48gr9JAwplrQ50m9KX6GwQhDRYKN/Dh
-zOt9DCUkqMqdi5T4v2qNTfkL7iXBMhsSkeYUQ/tFLyv4QQyli5uTUZ5FNXohOVAx
-TNyV9+gcV5WiBR0Aje6rwPW3oTkrPnVfZCdBwt/mZjPNMO5Se7D/lWE33yYu7bJ+
-gaxRNynhEOB7RaOePzDjn7LExahFmTFV0sgQxwQ2BYsfI22cdkAf6qOxdK/kqiQm
-lgzRpDjyPIFhaCCHnXyJdSqcHmDrCjcg2P6AVCDJGdFOBvupeJ7Kg7WV5EY7G6AU
-ng16tyumJSMWSzSks9M0Ikop6xhq3cV+Q0OArJoreQ6eonezXjM9Y865xjF80nJL
-V4lcRxdXfoKpXJwzc++pgkY9t55J0+cEyBvIXfKud1/HHOhewhoy5ATyi9LLM91n
-iW1DaQXlvHZgE7GFMSCVLxy6ZopBbm9tF0NQDFi8zUtGulD3Gkoc/Bp+DWb2vsX4
-S8W9vByNvIz/SWOGNbEs2irTRXccMAL7JHJ+74bwZZi5DRrqyQWHCn/3Ls2YPI6z
-lnfl15EE4G7g3+nrvP2lZFBXjsdG/U3HYi+tAyHkRN3oXvgnt9N76PoY8dlsNf6c
-RuNqgk31uO1sX/8du3Jxz87MlzWiG3kbAHMvbcoCgy/dW4JQcM3Sqg5PmF8i9wD1
-ZuqZ7zHpWILIWd13TM3UDolQZzl+GXEX62dPPL1vBtxHhDgQicdaWFXa6DX3dVwt
-DToWaAqrAPIrgxvNk5FHNCTEVTQkmCIL5JoinZSk7BAl8b085CPM6F7OjB5CR4Ts
-V+6UaTUZqk+z+raL+HJNW2ds1r7+t8Po5CydMBS4M/pE7b/laUnbRu7rO8cqKucn
-n+eYimib/0YuqZj9u2RXso4kzdOyIxGSGHkmSzYuoNRx80r+jHtcBBTqXk37t0FY
-X5O7QItCE+uwV1Sa12yg2dgJ6vKRPCEVyMoYUBwNbKEcw1pjG9Em7HwjOZK0UrO1
-yKRz6kxffVKN9Naf7lOnXooVuedY/jcaZ2zCZtASlOe8iiQK5prM4sbMixMp9ovL
-tTxy9E9kgvaI/mkzarloKPQGsk0WzuH+i39M3DOXrMf5HwfE+A55u1gnrHsxQlxp
-z5acwN42+4ln6axs4aweMGAhyEtBW8TdsNomwuPk+tpqZXHI2pqS4/aVOk8R8VE7
-IqtBx2QBMINT79PDPOn3K6v9HEt9fUHJ2TWJvKRKfsu5lECJPJSJA8OQ7zzw6zQt
-NXw8UhZRmNW0+eI5dykg+XsII7+njYa33EJ1Sy1Ni8ZT/izKfrKCwEm44KVAyUG5
-qUjghPPMNQY3D0qOl54DRfGVOxbHztUooblW+DnlLlpOy/+/B+H9Dscxosdx2/Mo
-RftJOMlLqK7AYIYAlw1zvqZo0pf7rCcLSLt+6FrPtNZe6ULFUacZ3RqyTZovsZi5
-Ucda3bLdOHX6tKL21bRfN7L0/BjF6BJETpG3p+rBYOyCwO6HvdenpMm6cT02nrfP
-QJtImjeW1ov6Pw02zNlIZAXFir78Z6AcMhV2iKEJxc1RMFBcXmylNXJmGlKYB3lJ
-jWo6qumLewTz5vzRu0vZCmOf+bKmuyVxckPbrzP+4OHKhpm95Kp6sUn2pvh0S8H3
-w1pjfZ9+sIaVgMspfRPgoWTyZ0flFvAX6DHWYVejMebwfAqZaa+UAJJ6jWQbMNzo
-ZtOhzCjV+2ZBYHvSiY7dtfaLwQJeMWEKIw32kEYv/Ts33n7dD/pAzZu0WCyfoqsQ
-MEXhbZYSCQTJ8/gqvdlurWOJL091z6Uw810YVt+wMqsBo5lnMsS3GqkzgM2PVzuV
-taddovr5CrWfAjQaFG8wcETiKEQFWS9JctKo0F+gwLwkVyc4fBSkjVmIliw1jXGu
-Enf2mBei+n8EaRB2nNa/CBVGQM24WEeMNq+TqaMvnEonvMtCIEpuJAO/NzJ1pxw0
-9S+LKq3lFoIQoON5glsjV82WseAbFXmynBmSbyUY/mZQpjuNSnwLfpz4630x5vuV
-VNglsZ8lW9XtSPh6GkMj+lLOCqJ5aZ4UEXDSYW7IaH4sPuQ4eAAUsKx/XlbmaOad
-hgK+3gHYi98fiGGQjt9OqKzQRxVFnHtoSwbMp/gjAWqjDCFdo7RkCqFjfB1DsSj0
-TrjZU1lVMrmdEhtUNjqfRpWN82f55fxZdrHEPUQIrOywdbRiNbONwm4AfSE8ViPz
-+SltYpQfF6g+tfZMwsoPSevLjdcmb1k3n8/lsEL99wpMT3NbibaXCjeJCZbAYK05
-rUw5bFTVAuv6i3Bax3rx5DqyQANS3S8TBVYrdXf9x7RpQ8oeb4oo+qn293bP4n5m
-nW/D/yvsAJYcm3lD7oW7D369nV/mwKPpNC4B9q6N1FiUndvdFSbyzfNfSF9LV0RU
-A/4Qm05HtE3PAUFYfwwP8MDg0HdltMn83VfqrEi/d76xlcxfoIh2RQQgqxCIS6KE
-AExIY/hPYDVxApznI39xNOp7IqdPEX3i7Cv7aHeFAwbhXYMNnkfFJJTkHRdcRiJ/
-RE1QPlC7ijH+IF02PE/seYg4GWrkeW3jvi+IKQ9BPBoYIx0P+7wHXf4ZGtZMourd
-N4fdwzFCDMFkS7wQC/GOqZltzF/gz1fWEGXRTH3Lqx0iKyiiLs2trQhFOzNw3B7E
-WxCIUjRMAAJ6vvUdvoFlMw8WfBkzCVple4yrCqIw6fJEq8v0q8EQ7qKDTfyPnFBt
-CtQZuTozfdPDnVHGmGPQKUODH/6Vwl+9/l7HDvV8/D/HKDnP581ix1a3bdokNtSK
-7rBfovpzYltYGpVxsC6MZByYEpvIh5nHQouLR4L3Je2wB3F9nBGjNhBvGDQlxcne
-AAgywpOpQfvfsnYRWt2vlQzwhHUgWhJmGMhGMmn4oKc5su87G7yzFEnq/yIUMOm/
-X0Zof/Qm92KCJS7YkLzP1GDO9XPMe+ZHeHVNXhVNCRxGNbHCHB9+g9v090sLLmal
-jpgrDks19uHv0yYiMqBdpstzxClRWxgHwrZO6jtbr5jeJuLVUxV0uuX76oeomUj2
-mAwoD5cB1U8W9Ew+cMjp5v6gg0LTk90HftjhrZmMA0Ll6TqFWjxge+jsswOY1SZi
-peuQGIHFcuQ7SEcyIbqju3bmeEGZwTz51yo8x2WqpCwB1a4UTngWJgDCySAI58fM
-eRL6r478CAZjk+fu9ZA85B7tFczl3lj0B4QHxkX370ZeCHy39qw8vMYIcPk3ytI0
-vmj5UCSeQDHHDcwo54wi83IFEWUFh18gP4ty5Tfvs6qv7qd455UQZTAO7lwpdBlp
-MJGlMqBHjDLGyY80p+O4vdlQBZ1uMH+48u91mokUP8p+tVVKh7bAw/HPG+SQsuNR
-DXF+gTm/hRuY7IYe3C7Myzc8bDTtFw6Es9BLAqzFFAMjzDVz7wY1rnZQq4mmLcKg
-AAMJaqItipKAroYIntXXJ3U8fsUt03M=
+ MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAU08PI+CBUqOd4
+Nbte7MLw2qCYn1UwDQYJKoZIhvcNAQEBBQAEggEASTTfHNyY+9hdXd+Eqtqk+yPb
+RA7rRXWR8tQAJsdy3zAlu8WHymq945fnsf0bAW4mODIPYhhevmdo5VaI54AzAWhk
+EfJvtRQlZZEMGZVKgUSwP4AG6cFaSnJuAYbi27nffM45PgD26O2WjOhnmM7minEC
+31/wUoxjxVOxIc8x+Ngo+TquyBeaK1iXcchwIUnbM0xRYMfccOAEhe/iytKFPzdg
+DJbDk+KbVGaUuUfhF+o4mMyJNezMUFxWkePcUgP12li57GTJSIyi8OQaFUu1qh0L
+KzQ2sYl8U0WmWQBhXqvuug47WI/6XrRDpKslIV1aV4XxD1Or6H3nf0fULjQZajCC
+DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQI+4Ch/cEogOSAgg0QvlelG9yDK2GE
+XX1wn8Xw0wCt+zIceXs8C6QuRSmZLEkZVv8Y+duMwi2A0tcg63HOmY2AfIPvTTt8
+eto3YwIklrfF20jBvCg/pT3kfm6TICWmMNd5XesTq8UNmkqzJQQ84L3Kbs/ix2pG
+9RaeXkrg0VO7FBDVH8b+jIT9IVDAEXgBQVefcCImVZ9L2hQWNABFrFXAQSTKjfFJ
+IEOfXUhTiH434V1RKJczhFiH5SNZ0kbaRjmaQkXqbXQ5kKoq8VNkmFc6vPCclTmq
+QJFfIUTepljWW/HuVkUycNYQQkblmWNF9FEwSx++x3Tz1FLR3UlzOkJCqr+tS3jv
+WFnI16VlOHaaHA++YKhW1PUujJcEdZaXBE0FC6JZF7IOAOjSdLSmRL9yU95erfgZ
+hRo2FB8EWVZitIG+DPU9vU59chGpqXYzZU4/aTpedGeWSZ9GFXRqwb6htmajjTWu
+l5fIME3hWt7kcejpuXCTDcdG4YcbngZu4hcepMrUhm9g2BdmIDb1YiB7290PMop8
+4nNo97tSBvhzk300cg6+pfxy1iAv3++g/ggOI+Y/gFmgN88mmBMWm0+mocJ0SZGY
+3+8K/8pDpJpfAAXSjayl7T2UXUdJe8fpOtetiHUr2zIbZXlM4IQw+0UMAVjTiaRT
+BIDGoPEcpCcxqPlSTTEie166uzzPXG9skVgennjN6YopwMC/WPaFRJu/eTlQOqlB
+EqvK9TKJG8u2yp00J04MGYXluY4l/o3/KLpT0mCOeOJm3KerfwQ/jU2oHHmvIATN
+XYy32ULqx/CjL+N3ax0Nu+UrgMQPcVhrTN/7lnZpFLYwXetGzH/4jdNfIfTc4yGn
+0GlVT6cVgJyV8wyYpbqCxHtCW83II8vXLjTfeIffHBoJU0fMMPWEIxRuMQSksm0H
+F1u/rfGVSXnueshbJUD3pnvTiLPuWcOexSxP+B8BCNfi21jX5Ha+U9RKrKbHc4h9
+PkiWxU6ZEqCBkdP9ssKnmMKMsrC7sZRoYziHNeqlZp/GFQmkI+DeFlqSPn3Lv9Or
+HF3bZokZCf0RGEkZDPrigaiEoL7PH/TtVZF8miL4JCLB0FVB08vWeeP5zjQT4H6J
+jSC2pw+5bA2UWGshgsKKAJJihYcOuybtzglh7nqmSSZcszz3GyuDhdR8KDrYwChU
+Hn13+rSWAbbqtxSyPc5fd22Q4Do2aD6PVdRadHjG0qeE7Dq46YHT3Z9KF0nQTLk8
+uYq8hL5+jQEgTnUB0yJTKdEcg05TyrMfNHWuM1pru0bqpf25vpwP5t+Sd/vgWJNc
+XtRLWrMdYBuSG9zOyLaH7bj0rcMhN3ULisKej9IT/xHOWSXXZjNoe1P3q9fvtMbg
+ZXAale/xJ6rXq6mLvZXivJfQJkPbSV7fByPPKO6TMnHbNEgLOGO3XtHEwC24JKup
+C0ohq03QqQHEisS9Mk5LvWmSchXR3/7vCtJFyOemQom7nCy8cx4Y1JGmZ4SGSaEs
+QZs7GC7Ftb/X82LRuknvS19ApOVFEs4/8t+LviD3x7Z9quVv+fZvydhzNKGRR6kQ
+fYZwK7rqqkvuFKgXqNbzlrtlUqOUPXJgdO7QHOtU8z+k2NzBWfOp6j+Ef8rc3GDU
+HSVZZ/Lz0RWedxRC1zoZJSol7ckMxIGIpDhtb9xgDmaGKILWOR9k+wG6+7ywQ2LE
+PB3myDOclvKUDyb/DqwRS9ch9yyYSmz8WXTgdSeyOjp8QT2JQuuOOhoooHuKSxAk
++7v/Fh5bNGtjHByuzMYSdLcWsLX+UohpDoc1heVgUA3R6EuIOJTA0nC653YmqIBp
+R5rsT+esub/EndweZTacmc2nDJxTKdZgMvdwhnsOZZBGsOaD7MXAS5vCsze+PQmY
+4+VqqWPASaclV6CygN4qSxmww6mVgmAgWVmJqfa6vOyb3zhx68TkNEp9rxJFcJSJ
+NiTTvWe0nF+o2/a1HZ8rZFdf65KsqGSiqu/6HoUuFzWLxRCqSjB9RkfSqrDVAVim
+pwL46zGRsqZV+5xrRQlxINNUbg/D11zcp1zdhQvhDrpBoLMjK7AaxA5msPYFy6Gm
+KMRAG2kyi802W5CPZWkbiEoUA8vkiICuxN+Pdh146zk9Ngl4PC3YpNCMtXK11ifd
+hYxmWqEuQ2AcdVTckosaWrFMn5MqEcR0aAXZbnjIMgTZ6SMYJBZMWjzJhe/UQjTo
+vICK7KAH82chpW2hG2I67z7e1Nv930RyL6JbYI8mSqgccPBzOBUhpHvKDM59z8Nc
+eStEYDdOcMz8P+c/H3Bh4WsyMWMOwWvjyy6GX5Bpl5z94tWFRn6W4FK5iDqp+HHm
+v5W1+hlFBxXtuzBcSQntcj8LoExJ2mK6BhZkaeAESMqPvNeNFmhEVUGq0/+c7T4I
+L+1YkQPcm/nIpwW/ITmkGmi5n5VsvbJFDbQe+h9LI2aqvWtzA0YT5Ed77Glbdbgq
+qB8EyXdr1BsBb7s7bbXm4Wf8UJkCZESg8iQExkUk8HqMJRxjctjma0DyyKVi4j8Q
++BA1EYBEX37641S+ZR9fYmQeuULGkf3d+w/ttgvm6YDZivsZYWkTscX+lUtoHhWN
+5EOAfllI0/DaGX15mGONMV8YA1PoCNEX3yKJ5tVGkxxUPK+Op7ZHvJmtb1fPMRRY
+z+evQ+NTXTZZzdr3Kfs4yYbuXG4e1odm2v/zBKG7JF3yWPMtXZZiMks/BkaXTq1P
+LrB0VxGcMsLeQ5HbbWJtchyCWyy63CNNbfYNohjxru52DjaAQlDKQT9pOiSmGJzb
+7+hNnKYnOfo6Du2ljz7C9C4mxnRJsRA2O9Cw66J5XPy1W+2+RmvP72jXwoFWYzPq
+jxNs2wxOYQjEDpXBTmCbW58F5cTbSTk3D15iCtYtf31tpuPpHEnz+2OvrX0WhygN
+esZJnln2Tu2ut1pVhAuJDLZTj24Y4MP0nmDINuLDAkFji0CwjACvW7M9SbIOLLYU
++5JHHjB7wqaTXWFzpt/ZKXMXlwCzWjo3pDERbrpYbwS3GHqmtcyIZK4EA7Ulka5Y
+7rLPWS5eKcjX3tp2FyX5pD52TpuUMPAk6vyefX+NznP7opvJpusHbkschojFVRDA
+zHIpIGeWjYcWLk5YTPagzH8o+4ci1OEk+OMc8i6PxkQDeBw1RiCAFfBnKPCSEtFk
+KJlw7fspk3/chA6mmvOHjkrQmUhUuDxAVGCVxl0K5LU3Y2IQxKGtCJk5YO4XD2e7
+5b0Ub+wy4Bb0l+z8HjuqEypFXDpQTd80NbhStZBgf2cB01elsqmKD9sT9wpFGKbC
+VaatDLsLx4XrBG6ueoFKBgFL6l7afEPct8wuSoUrX5MAGlge5xzQYAD5spLlEa9G
+Dt2KiPCsZcqWiaHiw5vk849FXUcfFfGl+0rEKhzcfUn3zkL1mGfqZ8Nf7qjMXdMy
+dbUUQYMZXtMtK3fnYBnavgaUcu0bZ7Av+GVTQvDxfpzSeMW8lK7Ko6mINFQVC8dx
+TEKWX+eApFUnTb11vNNxwxdOB2l5N+kfNLnVMhuYd7l8IHQxMMQTcf8hYu0owry6
+JkIdkhnF1kXVC2YWxo4VrDPwzkBWZE28ygBNhWgKCRhZnnbDEWPuqGP/IaLN4vww
+1lqkZltqZDddXvOTXN/tZmkkQHt2uP264vqJB2BkGzxOll5UDQ8V3gXwheuUGxYc
+gVL4ZJSKfHnUp6oRafIBnQs5RBvqdj2wewzT8AyPWImRG6fkYvsub8qIFqG6mu4Y
+ixAQ9oTgg/KOXYNsfYuLGswu/aNnAqMEjfMerSx7dDu7teETkWb+IQJtodOdE/LI
+yO/puds1M+V2H0TD36zXRyvEnpfm5BTURkxM8dI6meR37/JGtObtjg+Gzjpu6HGm
+sIYyhG8bvV0Vkuip4bEgBB6T39dt/DeElHABthUmzFZe/QC8j7IJjyCz40JWDJSo
+8wPtOoLnLeX0ynD8x8A5NsQk3W9fgEtv0WG6Uahs7P8GEZ5Uh9GPvWQpAkjKv7OZ
+XVHJdTBMJICbB1Bzr8Nl0qPfQrhFzTNBMjBEwyaBpzRiV1hdTB2YPJPbjQQtQGkO
+vT/EsAEWwSqDrQrDCfGRl7mhjdAsVFMjERdJE3/2TctY8VnLaRzUTSGkpCKxl+V4
+CLrBi96N80pxer5eKYtt5gtLFw0gZeeeqb2VDj6ChVnUjJ9r0TXzyy8ztwpB8X5Y
+mZUDASD1acdZZOiEp69WA6juQR0EGKQT5phh+k0HbziW+bXMM+7YwiRJzwX4obnd
+wgF+wyHht3Rzaptv5JSZMkc1RGSFIdWUwEp+3Ik6DGywiTcVkU65TQ7CsQJjmmkL
+AChG7tUBI4KmolT9D0rj3A90//wl3ACkCFq94m0BZOFiimUXFjqux135P5i37XRJ
+/8wgWZ0nzmXdFyTkEJEessAMbCkMiDHwaT7Lbs+S0qFeobh4DD3tkONnqSNa7md4
+945Z9MJiapzD3P33TvKhyQ0wHe5W0z4=
\ No newline at end of file
diff --git a/tests/data/wire/certs_no_format_specified.xml b/tests/data/wire/certs_no_format_specified.xml
index 4ab91a859..14a9f6525 100644
--- a/tests/data/wire/certs_no_format_specified.xml
+++ b/tests/data/wire/certs_no_format_specified.xml
@@ -1,85 +1,85 @@
2012-11-30
- 12
+ 1
- MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAUZcG9X+5aK8VZ
-FY8eJV9j+RImq58wDQYJKoZIhvcNAQEBBQAEggEAn/hOytP/StyRuXHcqFq6x+Za
-7gHfO8prXWdZW4e28NLt/x5ZOBHDDZ6buwwdXEZME0+RoiJvLqP2RNhZkEO8bkna
-pS76xLZE4NXyfxkeEs1vJYis0WJdt/56uCzBuud2SBLuMWoAWgF5alokN0uFpVgm
-CKCos+xv6Pisolc6geM8xQTYe6sLf5Z23LWftWfJqzuo/29glCCre7R80OLeZe5w
-pN6XztbYz06nhVByC35To8Lm0akWAAKU7sfqM1Nty4P0rwUJPKXo42uN1GKYbDbF
-x8piCAd+rs+q4Alu3qK/YaTPpMb2ECRMH6CYB8Klf/CbuWykkfS8zrsnpXT1kzCC
-DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQInjJWFaJcZz2Agg0QX6NlJUH17o20
-90gfjWV01mPmzLKx71JT+hyzKr5vHywDSRI/mdb3RqA59ZrIKeyWr0HXEOuABlul
-nxjc/Rfk1tiLQwh0iqlOjlMtRsxS6yDA0WNwK2Y9gaXcdgDm5Vioai18l4Pd0qzK
-fsof5a/jEJyunW1CZK19QciwfQ2pS8QbRYgeLRZRft2I+kv6cWXlGS6YrMqKQC8t
-QMxnXR4AuzVllPLbbIgtM3l9oS+6jl7jKyKogeroJ9FNLjoMBJLldRLGPRhkCmdJ
-Z1m+s/BAVUH08qgj2kmHzucdULLjlRcmma9m/h91TcQCXHAavf7S+U9QwIyGRh83
-t4Y7EqbQ93mOgjajFzILSL7AT/irgJpDu6CJqMu3EMNDA0mjxn5Cdvj40sufL/g3
-UyBwqosmIwAPzNDmhPtTKvHaHfGY/k8WhoIYfAA5Lhq1z22/RODZOY0Ch2XyxQM4
-s35eppe6IhnwyMv6HfrCrqE/o/16OrvvbaFQTeTlMvU0P7MIR4pVW6tRq4NEa5Wx
-JcvGutuMuzH1VMcqcKdc7wYyOqDOGU43kcV8PiALTIcqrhD8NDrKks1jSkyqQw2h
-sJQckNaQIcCXkUQecQa2UGe0l4HDSJ5gAETSenjyLBKFHf3hxiWBpw446/bODgdk
-0+oyreZqMpRz3vn9LC+Yt7SuVbTzRdx7nlKIiNvo8+btOuVm44evchLFHAq3Ni8D
-c+tP/wss3K4Xdp+t5SvEY/nLIu11Lw44HDVMYTuNz3Ya9psL70ZLaLZM8NromnEl
-CUMRNTPoOC7/KDRh2E9d6c1V4CC43wAsRhksGJnSYoiSVAhaVgLqVFQTsqNHxmcg
-3Y9AEBVzm3fZg6+DxAYu+amb+r8lk0Pp+N1t6rVbKXhkbAAxg0UDO3pY8Xcz0Y3g
-Qdd5rnHh1rJrehku7zTHvQaXEddUWmCoUGIXJ+bt4VOhErL6s5/j8GSG0xmfxgSE
-jnGj4Jwd0Vv19uZjsBDQ54R88GcA9YX8r48gr9JAwplrQ50m9KX6GwQhDRYKN/Dh
-zOt9DCUkqMqdi5T4v2qNTfkL7iXBMhsSkeYUQ/tFLyv4QQyli5uTUZ5FNXohOVAx
-TNyV9+gcV5WiBR0Aje6rwPW3oTkrPnVfZCdBwt/mZjPNMO5Se7D/lWE33yYu7bJ+
-gaxRNynhEOB7RaOePzDjn7LExahFmTFV0sgQxwQ2BYsfI22cdkAf6qOxdK/kqiQm
-lgzRpDjyPIFhaCCHnXyJdSqcHmDrCjcg2P6AVCDJGdFOBvupeJ7Kg7WV5EY7G6AU
-ng16tyumJSMWSzSks9M0Ikop6xhq3cV+Q0OArJoreQ6eonezXjM9Y865xjF80nJL
-V4lcRxdXfoKpXJwzc++pgkY9t55J0+cEyBvIXfKud1/HHOhewhoy5ATyi9LLM91n
-iW1DaQXlvHZgE7GFMSCVLxy6ZopBbm9tF0NQDFi8zUtGulD3Gkoc/Bp+DWb2vsX4
-S8W9vByNvIz/SWOGNbEs2irTRXccMAL7JHJ+74bwZZi5DRrqyQWHCn/3Ls2YPI6z
-lnfl15EE4G7g3+nrvP2lZFBXjsdG/U3HYi+tAyHkRN3oXvgnt9N76PoY8dlsNf6c
-RuNqgk31uO1sX/8du3Jxz87MlzWiG3kbAHMvbcoCgy/dW4JQcM3Sqg5PmF8i9wD1
-ZuqZ7zHpWILIWd13TM3UDolQZzl+GXEX62dPPL1vBtxHhDgQicdaWFXa6DX3dVwt
-DToWaAqrAPIrgxvNk5FHNCTEVTQkmCIL5JoinZSk7BAl8b085CPM6F7OjB5CR4Ts
-V+6UaTUZqk+z+raL+HJNW2ds1r7+t8Po5CydMBS4M/pE7b/laUnbRu7rO8cqKucn
-n+eYimib/0YuqZj9u2RXso4kzdOyIxGSGHkmSzYuoNRx80r+jHtcBBTqXk37t0FY
-X5O7QItCE+uwV1Sa12yg2dgJ6vKRPCEVyMoYUBwNbKEcw1pjG9Em7HwjOZK0UrO1
-yKRz6kxffVKN9Naf7lOnXooVuedY/jcaZ2zCZtASlOe8iiQK5prM4sbMixMp9ovL
-tTxy9E9kgvaI/mkzarloKPQGsk0WzuH+i39M3DOXrMf5HwfE+A55u1gnrHsxQlxp
-z5acwN42+4ln6axs4aweMGAhyEtBW8TdsNomwuPk+tpqZXHI2pqS4/aVOk8R8VE7
-IqtBx2QBMINT79PDPOn3K6v9HEt9fUHJ2TWJvKRKfsu5lECJPJSJA8OQ7zzw6zQt
-NXw8UhZRmNW0+eI5dykg+XsII7+njYa33EJ1Sy1Ni8ZT/izKfrKCwEm44KVAyUG5
-qUjghPPMNQY3D0qOl54DRfGVOxbHztUooblW+DnlLlpOy/+/B+H9Dscxosdx2/Mo
-RftJOMlLqK7AYIYAlw1zvqZo0pf7rCcLSLt+6FrPtNZe6ULFUacZ3RqyTZovsZi5
-Ucda3bLdOHX6tKL21bRfN7L0/BjF6BJETpG3p+rBYOyCwO6HvdenpMm6cT02nrfP
-QJtImjeW1ov6Pw02zNlIZAXFir78Z6AcMhV2iKEJxc1RMFBcXmylNXJmGlKYB3lJ
-jWo6qumLewTz5vzRu0vZCmOf+bKmuyVxckPbrzP+4OHKhpm95Kp6sUn2pvh0S8H3
-w1pjfZ9+sIaVgMspfRPgoWTyZ0flFvAX6DHWYVejMebwfAqZaa+UAJJ6jWQbMNzo
-ZtOhzCjV+2ZBYHvSiY7dtfaLwQJeMWEKIw32kEYv/Ts33n7dD/pAzZu0WCyfoqsQ
-MEXhbZYSCQTJ8/gqvdlurWOJL091z6Uw810YVt+wMqsBo5lnMsS3GqkzgM2PVzuV
-taddovr5CrWfAjQaFG8wcETiKEQFWS9JctKo0F+gwLwkVyc4fBSkjVmIliw1jXGu
-Enf2mBei+n8EaRB2nNa/CBVGQM24WEeMNq+TqaMvnEonvMtCIEpuJAO/NzJ1pxw0
-9S+LKq3lFoIQoON5glsjV82WseAbFXmynBmSbyUY/mZQpjuNSnwLfpz4630x5vuV
-VNglsZ8lW9XtSPh6GkMj+lLOCqJ5aZ4UEXDSYW7IaH4sPuQ4eAAUsKx/XlbmaOad
-hgK+3gHYi98fiGGQjt9OqKzQRxVFnHtoSwbMp/gjAWqjDCFdo7RkCqFjfB1DsSj0
-TrjZU1lVMrmdEhtUNjqfRpWN82f55fxZdrHEPUQIrOywdbRiNbONwm4AfSE8ViPz
-+SltYpQfF6g+tfZMwsoPSevLjdcmb1k3n8/lsEL99wpMT3NbibaXCjeJCZbAYK05
-rUw5bFTVAuv6i3Bax3rx5DqyQANS3S8TBVYrdXf9x7RpQ8oeb4oo+qn293bP4n5m
-nW/D/yvsAJYcm3lD7oW7D369nV/mwKPpNC4B9q6N1FiUndvdFSbyzfNfSF9LV0RU
-A/4Qm05HtE3PAUFYfwwP8MDg0HdltMn83VfqrEi/d76xlcxfoIh2RQQgqxCIS6KE
-AExIY/hPYDVxApznI39xNOp7IqdPEX3i7Cv7aHeFAwbhXYMNnkfFJJTkHRdcRiJ/
-RE1QPlC7ijH+IF02PE/seYg4GWrkeW3jvi+IKQ9BPBoYIx0P+7wHXf4ZGtZMourd
-N4fdwzFCDMFkS7wQC/GOqZltzF/gz1fWEGXRTH3Lqx0iKyiiLs2trQhFOzNw3B7E
-WxCIUjRMAAJ6vvUdvoFlMw8WfBkzCVple4yrCqIw6fJEq8v0q8EQ7qKDTfyPnFBt
-CtQZuTozfdPDnVHGmGPQKUODH/6Vwl+9/l7HDvV8/D/HKDnP581ix1a3bdokNtSK
-7rBfovpzYltYGpVxsC6MZByYEpvIh5nHQouLR4L3Je2wB3F9nBGjNhBvGDQlxcne
-AAgywpOpQfvfsnYRWt2vlQzwhHUgWhJmGMhGMmn4oKc5su87G7yzFEnq/yIUMOm/
-X0Zof/Qm92KCJS7YkLzP1GDO9XPMe+ZHeHVNXhVNCRxGNbHCHB9+g9v090sLLmal
-jpgrDks19uHv0yYiMqBdpstzxClRWxgHwrZO6jtbr5jeJuLVUxV0uuX76oeomUj2
-mAwoD5cB1U8W9Ew+cMjp5v6gg0LTk90HftjhrZmMA0Ll6TqFWjxge+jsswOY1SZi
-peuQGIHFcuQ7SEcyIbqju3bmeEGZwTz51yo8x2WqpCwB1a4UTngWJgDCySAI58fM
-eRL6r478CAZjk+fu9ZA85B7tFczl3lj0B4QHxkX370ZeCHy39qw8vMYIcPk3ytI0
-vmj5UCSeQDHHDcwo54wi83IFEWUFh18gP4ty5Tfvs6qv7qd455UQZTAO7lwpdBlp
-MJGlMqBHjDLGyY80p+O4vdlQBZ1uMH+48u91mokUP8p+tVVKh7bAw/HPG+SQsuNR
-DXF+gTm/hRuY7IYe3C7Myzc8bDTtFw6Es9BLAqzFFAMjzDVz7wY1rnZQq4mmLcKg
-AAMJaqItipKAroYIntXXJ3U8fsUt03M=
+ MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAU08PI+CBUqOd4
+Nbte7MLw2qCYn1UwDQYJKoZIhvcNAQEBBQAEggEASTTfHNyY+9hdXd+Eqtqk+yPb
+RA7rRXWR8tQAJsdy3zAlu8WHymq945fnsf0bAW4mODIPYhhevmdo5VaI54AzAWhk
+EfJvtRQlZZEMGZVKgUSwP4AG6cFaSnJuAYbi27nffM45PgD26O2WjOhnmM7minEC
+31/wUoxjxVOxIc8x+Ngo+TquyBeaK1iXcchwIUnbM0xRYMfccOAEhe/iytKFPzdg
+DJbDk+KbVGaUuUfhF+o4mMyJNezMUFxWkePcUgP12li57GTJSIyi8OQaFUu1qh0L
+KzQ2sYl8U0WmWQBhXqvuug47WI/6XrRDpKslIV1aV4XxD1Or6H3nf0fULjQZajCC
+DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQI+4Ch/cEogOSAgg0QvlelG9yDK2GE
+XX1wn8Xw0wCt+zIceXs8C6QuRSmZLEkZVv8Y+duMwi2A0tcg63HOmY2AfIPvTTt8
+eto3YwIklrfF20jBvCg/pT3kfm6TICWmMNd5XesTq8UNmkqzJQQ84L3Kbs/ix2pG
+9RaeXkrg0VO7FBDVH8b+jIT9IVDAEXgBQVefcCImVZ9L2hQWNABFrFXAQSTKjfFJ
+IEOfXUhTiH434V1RKJczhFiH5SNZ0kbaRjmaQkXqbXQ5kKoq8VNkmFc6vPCclTmq
+QJFfIUTepljWW/HuVkUycNYQQkblmWNF9FEwSx++x3Tz1FLR3UlzOkJCqr+tS3jv
+WFnI16VlOHaaHA++YKhW1PUujJcEdZaXBE0FC6JZF7IOAOjSdLSmRL9yU95erfgZ
+hRo2FB8EWVZitIG+DPU9vU59chGpqXYzZU4/aTpedGeWSZ9GFXRqwb6htmajjTWu
+l5fIME3hWt7kcejpuXCTDcdG4YcbngZu4hcepMrUhm9g2BdmIDb1YiB7290PMop8
+4nNo97tSBvhzk300cg6+pfxy1iAv3++g/ggOI+Y/gFmgN88mmBMWm0+mocJ0SZGY
+3+8K/8pDpJpfAAXSjayl7T2UXUdJe8fpOtetiHUr2zIbZXlM4IQw+0UMAVjTiaRT
+BIDGoPEcpCcxqPlSTTEie166uzzPXG9skVgennjN6YopwMC/WPaFRJu/eTlQOqlB
+EqvK9TKJG8u2yp00J04MGYXluY4l/o3/KLpT0mCOeOJm3KerfwQ/jU2oHHmvIATN
+XYy32ULqx/CjL+N3ax0Nu+UrgMQPcVhrTN/7lnZpFLYwXetGzH/4jdNfIfTc4yGn
+0GlVT6cVgJyV8wyYpbqCxHtCW83II8vXLjTfeIffHBoJU0fMMPWEIxRuMQSksm0H
+F1u/rfGVSXnueshbJUD3pnvTiLPuWcOexSxP+B8BCNfi21jX5Ha+U9RKrKbHc4h9
+PkiWxU6ZEqCBkdP9ssKnmMKMsrC7sZRoYziHNeqlZp/GFQmkI+DeFlqSPn3Lv9Or
+HF3bZokZCf0RGEkZDPrigaiEoL7PH/TtVZF8miL4JCLB0FVB08vWeeP5zjQT4H6J
+jSC2pw+5bA2UWGshgsKKAJJihYcOuybtzglh7nqmSSZcszz3GyuDhdR8KDrYwChU
+Hn13+rSWAbbqtxSyPc5fd22Q4Do2aD6PVdRadHjG0qeE7Dq46YHT3Z9KF0nQTLk8
+uYq8hL5+jQEgTnUB0yJTKdEcg05TyrMfNHWuM1pru0bqpf25vpwP5t+Sd/vgWJNc
+XtRLWrMdYBuSG9zOyLaH7bj0rcMhN3ULisKej9IT/xHOWSXXZjNoe1P3q9fvtMbg
+ZXAale/xJ6rXq6mLvZXivJfQJkPbSV7fByPPKO6TMnHbNEgLOGO3XtHEwC24JKup
+C0ohq03QqQHEisS9Mk5LvWmSchXR3/7vCtJFyOemQom7nCy8cx4Y1JGmZ4SGSaEs
+QZs7GC7Ftb/X82LRuknvS19ApOVFEs4/8t+LviD3x7Z9quVv+fZvydhzNKGRR6kQ
+fYZwK7rqqkvuFKgXqNbzlrtlUqOUPXJgdO7QHOtU8z+k2NzBWfOp6j+Ef8rc3GDU
+HSVZZ/Lz0RWedxRC1zoZJSol7ckMxIGIpDhtb9xgDmaGKILWOR9k+wG6+7ywQ2LE
+PB3myDOclvKUDyb/DqwRS9ch9yyYSmz8WXTgdSeyOjp8QT2JQuuOOhoooHuKSxAk
++7v/Fh5bNGtjHByuzMYSdLcWsLX+UohpDoc1heVgUA3R6EuIOJTA0nC653YmqIBp
+R5rsT+esub/EndweZTacmc2nDJxTKdZgMvdwhnsOZZBGsOaD7MXAS5vCsze+PQmY
+4+VqqWPASaclV6CygN4qSxmww6mVgmAgWVmJqfa6vOyb3zhx68TkNEp9rxJFcJSJ
+NiTTvWe0nF+o2/a1HZ8rZFdf65KsqGSiqu/6HoUuFzWLxRCqSjB9RkfSqrDVAVim
+pwL46zGRsqZV+5xrRQlxINNUbg/D11zcp1zdhQvhDrpBoLMjK7AaxA5msPYFy6Gm
+KMRAG2kyi802W5CPZWkbiEoUA8vkiICuxN+Pdh146zk9Ngl4PC3YpNCMtXK11ifd
+hYxmWqEuQ2AcdVTckosaWrFMn5MqEcR0aAXZbnjIMgTZ6SMYJBZMWjzJhe/UQjTo
+vICK7KAH82chpW2hG2I67z7e1Nv930RyL6JbYI8mSqgccPBzOBUhpHvKDM59z8Nc
+eStEYDdOcMz8P+c/H3Bh4WsyMWMOwWvjyy6GX5Bpl5z94tWFRn6W4FK5iDqp+HHm
+v5W1+hlFBxXtuzBcSQntcj8LoExJ2mK6BhZkaeAESMqPvNeNFmhEVUGq0/+c7T4I
+L+1YkQPcm/nIpwW/ITmkGmi5n5VsvbJFDbQe+h9LI2aqvWtzA0YT5Ed77Glbdbgq
+qB8EyXdr1BsBb7s7bbXm4Wf8UJkCZESg8iQExkUk8HqMJRxjctjma0DyyKVi4j8Q
++BA1EYBEX37641S+ZR9fYmQeuULGkf3d+w/ttgvm6YDZivsZYWkTscX+lUtoHhWN
+5EOAfllI0/DaGX15mGONMV8YA1PoCNEX3yKJ5tVGkxxUPK+Op7ZHvJmtb1fPMRRY
+z+evQ+NTXTZZzdr3Kfs4yYbuXG4e1odm2v/zBKG7JF3yWPMtXZZiMks/BkaXTq1P
+LrB0VxGcMsLeQ5HbbWJtchyCWyy63CNNbfYNohjxru52DjaAQlDKQT9pOiSmGJzb
+7+hNnKYnOfo6Du2ljz7C9C4mxnRJsRA2O9Cw66J5XPy1W+2+RmvP72jXwoFWYzPq
+jxNs2wxOYQjEDpXBTmCbW58F5cTbSTk3D15iCtYtf31tpuPpHEnz+2OvrX0WhygN
+esZJnln2Tu2ut1pVhAuJDLZTj24Y4MP0nmDINuLDAkFji0CwjACvW7M9SbIOLLYU
++5JHHjB7wqaTXWFzpt/ZKXMXlwCzWjo3pDERbrpYbwS3GHqmtcyIZK4EA7Ulka5Y
+7rLPWS5eKcjX3tp2FyX5pD52TpuUMPAk6vyefX+NznP7opvJpusHbkschojFVRDA
+zHIpIGeWjYcWLk5YTPagzH8o+4ci1OEk+OMc8i6PxkQDeBw1RiCAFfBnKPCSEtFk
+KJlw7fspk3/chA6mmvOHjkrQmUhUuDxAVGCVxl0K5LU3Y2IQxKGtCJk5YO4XD2e7
+5b0Ub+wy4Bb0l+z8HjuqEypFXDpQTd80NbhStZBgf2cB01elsqmKD9sT9wpFGKbC
+VaatDLsLx4XrBG6ueoFKBgFL6l7afEPct8wuSoUrX5MAGlge5xzQYAD5spLlEa9G
+Dt2KiPCsZcqWiaHiw5vk849FXUcfFfGl+0rEKhzcfUn3zkL1mGfqZ8Nf7qjMXdMy
+dbUUQYMZXtMtK3fnYBnavgaUcu0bZ7Av+GVTQvDxfpzSeMW8lK7Ko6mINFQVC8dx
+TEKWX+eApFUnTb11vNNxwxdOB2l5N+kfNLnVMhuYd7l8IHQxMMQTcf8hYu0owry6
+JkIdkhnF1kXVC2YWxo4VrDPwzkBWZE28ygBNhWgKCRhZnnbDEWPuqGP/IaLN4vww
+1lqkZltqZDddXvOTXN/tZmkkQHt2uP264vqJB2BkGzxOll5UDQ8V3gXwheuUGxYc
+gVL4ZJSKfHnUp6oRafIBnQs5RBvqdj2wewzT8AyPWImRG6fkYvsub8qIFqG6mu4Y
+ixAQ9oTgg/KOXYNsfYuLGswu/aNnAqMEjfMerSx7dDu7teETkWb+IQJtodOdE/LI
+yO/puds1M+V2H0TD36zXRyvEnpfm5BTURkxM8dI6meR37/JGtObtjg+Gzjpu6HGm
+sIYyhG8bvV0Vkuip4bEgBB6T39dt/DeElHABthUmzFZe/QC8j7IJjyCz40JWDJSo
+8wPtOoLnLeX0ynD8x8A5NsQk3W9fgEtv0WG6Uahs7P8GEZ5Uh9GPvWQpAkjKv7OZ
+XVHJdTBMJICbB1Bzr8Nl0qPfQrhFzTNBMjBEwyaBpzRiV1hdTB2YPJPbjQQtQGkO
+vT/EsAEWwSqDrQrDCfGRl7mhjdAsVFMjERdJE3/2TctY8VnLaRzUTSGkpCKxl+V4
+CLrBi96N80pxer5eKYtt5gtLFw0gZeeeqb2VDj6ChVnUjJ9r0TXzyy8ztwpB8X5Y
+mZUDASD1acdZZOiEp69WA6juQR0EGKQT5phh+k0HbziW+bXMM+7YwiRJzwX4obnd
+wgF+wyHht3Rzaptv5JSZMkc1RGSFIdWUwEp+3Ik6DGywiTcVkU65TQ7CsQJjmmkL
+AChG7tUBI4KmolT9D0rj3A90//wl3ACkCFq94m0BZOFiimUXFjqux135P5i37XRJ
+/8wgWZ0nzmXdFyTkEJEessAMbCkMiDHwaT7Lbs+S0qFeobh4DD3tkONnqSNa7md4
+945Z9MJiapzD3P33TvKhyQ0wHe5W0z4=
-
+
\ No newline at end of file
diff --git a/tests/data/wire/ext_conf-no_encoded_signature.xml b/tests/data/wire/ext_conf-no_encoded_signature.xml
new file mode 100644
index 000000000..099ebacf3
--- /dev/null
+++ b/tests/data/wire/ext_conf-no_encoded_signature.xml
@@ -0,0 +1,28 @@
+
+
+
+ Prod
+
+ http://mock-goal-state/manifest_of_ga.xml
+
+
+
+ Test
+
+ http://mock-goal-state/manifest_of_ga.xml
+
+
+
+
+
+
+
+
+
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+
+
+https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
+
+
+
diff --git a/tests/data/wire/ext_conf-no_gs_metadata.xml b/tests/data/wire/ext_conf-no_gs_metadata.xml
index 605e48425..ef5d3a164 100644
--- a/tests/data/wire/ext_conf-no_gs_metadata.xml
+++ b/tests/data/wire/ext_conf-no_gs_metadata.xml
@@ -19,7 +19,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf.xml b/tests/data/wire/ext_conf.xml
index 54d785159..fff507d40 100644
--- a/tests/data/wire/ext_conf.xml
+++ b/tests/data/wire/ext_conf.xml
@@ -15,11 +15,11 @@
-
+
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_additional_locations.xml b/tests/data/wire/ext_conf_additional_locations.xml
index 8f5e746b0..20c7fb873 100644
--- a/tests/data/wire/ext_conf_additional_locations.xml
+++ b/tests/data/wire/ext_conf_additional_locations.xml
@@ -24,7 +24,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_autoupgrade.xml b/tests/data/wire/ext_conf_autoupgrade.xml
index 77a201ad9..74acf0af7 100644
--- a/tests/data/wire/ext_conf_autoupgrade.xml
+++ b/tests/data/wire/ext_conf_autoupgrade.xml
@@ -21,7 +21,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_autoupgrade_internalversion.xml b/tests/data/wire/ext_conf_autoupgrade_internalversion.xml
index 44cad8781..afa27c679 100644
--- a/tests/data/wire/ext_conf_autoupgrade_internalversion.xml
+++ b/tests/data/wire/ext_conf_autoupgrade_internalversion.xml
@@ -21,7 +21,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_dependencies_with_empty_settings.xml b/tests/data/wire/ext_conf_dependencies_with_empty_settings.xml
index b26395ec2..f705c2f1b 100644
--- a/tests/data/wire/ext_conf_dependencies_with_empty_settings.xml
+++ b/tests/data/wire/ext_conf_dependencies_with_empty_settings.xml
@@ -25,7 +25,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_in_vm_artifacts_profile.xml b/tests/data/wire/ext_conf_in_vm_artifacts_profile.xml
index a1af74f78..9575139a6 100644
--- a/tests/data/wire/ext_conf_in_vm_artifacts_profile.xml
+++ b/tests/data/wire/ext_conf_in_vm_artifacts_profile.xml
@@ -20,7 +20,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_in_vm_empty_artifacts_profile.xml b/tests/data/wire/ext_conf_in_vm_empty_artifacts_profile.xml
index cd5bb3d3e..a0c87cfb6 100644
--- a/tests/data/wire/ext_conf_in_vm_empty_artifacts_profile.xml
+++ b/tests/data/wire/ext_conf_in_vm_empty_artifacts_profile.xml
@@ -20,7 +20,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_in_vm_metadata.xml b/tests/data/wire/ext_conf_in_vm_metadata.xml
index 9a4f89cb8..ff5e92ae2 100644
--- a/tests/data/wire/ext_conf_in_vm_metadata.xml
+++ b/tests/data/wire/ext_conf_in_vm_metadata.xml
@@ -21,7 +21,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_internalversion.xml b/tests/data/wire/ext_conf_internalversion.xml
index 44cad8781..afa27c679 100644
--- a/tests/data/wire/ext_conf_internalversion.xml
+++ b/tests/data/wire/ext_conf_internalversion.xml
@@ -21,7 +21,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_invalid_and_valid_handlers.xml b/tests/data/wire/ext_conf_invalid_and_valid_handlers.xml
index f9c95d694..bede284e7 100644
--- a/tests/data/wire/ext_conf_invalid_and_valid_handlers.xml
+++ b/tests/data/wire/ext_conf_invalid_and_valid_handlers.xml
@@ -22,11 +22,11 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_invalid_vm_metadata.xml b/tests/data/wire/ext_conf_invalid_vm_metadata.xml
index 7c766220e..4eb35e87a 100644
--- a/tests/data/wire/ext_conf_invalid_vm_metadata.xml
+++ b/tests/data/wire/ext_conf_invalid_vm_metadata.xml
@@ -21,7 +21,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_multiple_extensions.xml b/tests/data/wire/ext_conf_multiple_extensions.xml
index 5845a179f..bde568bd1 100644
--- a/tests/data/wire/ext_conf_multiple_extensions.xml
+++ b/tests/data/wire/ext_conf_multiple_extensions.xml
@@ -25,22 +25,22 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIIB4AYJKoZIhvcNAQcDoIIB0TCCAc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEANYey5W0qDqC6RHZlVnpLp2dWrMr1Rt5TCFkOjq1jU4y2y1FPtsTTKq9Z5pdGb/IHQo9VcT+OFglO3bChMbqc1vgmk4wkTQkgJVD3C8Rq4nv3uvQIux+g8zsa1MPKT5fTwG/dcrBp9xqySJLexUiuJljmNJgorGc0KtLwjnad4HTSKudDSo5DGskSDLxxLZYx0VVtQvgekOOwT/0C0pN4+JS/766jdUAnHR3oOuD5Dx7/c6EhFSoiYXMA0bUzH7VZeF8j/rkP1xscLQRrCScCNV2Ox424Y4RBbcbP/p69lDxGURcIKLKrIUhQdC8CfUMkQUEmFDLcOtxutCTFBZYMJzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECCuc0a4Gl8PAgDgcHekee/CivSTCXntJiCrltUDob8cX4YtIS6lq3H08Ar+2tKkpg5e3bOkdAo3q2GfIrGDm4MtVWw==","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIIB4AYJKoZIhvcNAQcDoIIB0TCCAc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEANYey5W0qDqC6RHZlVnpLp2dWrMr1Rt5TCFkOjq1jU4y2y1FPtsTTKq9Z5pdGb/IHQo9VcT+OFglO3bChMbqc1vgmk4wkTQkgJVD3C8Rq4nv3uvQIux+g8zsa1MPKT5fTwG/dcrBp9xqySJLexUiuJljmNJgorGc0KtLwjnad4HTSKudDSo5DGskSDLxxLZYx0VVtQvgekOOwT/0C0pN4+JS/766jdUAnHR3oOuD5Dx7/c6EhFSoiYXMA0bUzH7VZeF8j/rkP1xscLQRrCScCNV2Ox424Y4RBbcbP/p69lDxGURcIKLKrIUhQdC8CfUMkQUEmFDLcOtxutCTFBZYMJzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECCuc0a4Gl8PAgDgcHekee/CivSTCXntJiCrltUDob8cX4YtIS6lq3H08Ar+2tKkpg5e3bOkdAo3q2GfIrGDm4MtVWw==","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIIBwAYJKoZIhvcNAQcDoIIBsTCCAa0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEABILhQPoMx3NEbd/sS0xAAE4rJXwzJSE0bWr4OaKpcGS4ePtaNW8XWm+psYR9CBlXuGCuDVlFEdPmO2Ai8NX8TvT7RVYYc6yVQKpNQqO6Q9g9O52XXX4tBSFSCfoTzd1kbGC1c2wbXDyeROGCjraWuGHd4C9s9gytpgAlYicZjOqV3deo30F4vXZ+ZhCNpMkOvSXcsNpzTzQ/mskwNubN8MPkg/jEAzTHRpiJl3tjGtTqm00GHMqFF8/31jnoLQeQnWSmY+FBpiTUhPzyjufIcoZ+ueGXZiJ77xyH2Rghh5wvQM8oTVy2dwFQGeqjHOVgdgRNi/HgfZhcdltaQ8kjYDA7BgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECHPM0ZKBn+aWgBiVPT7zlkJA8eGuH7bNMTQCtGoJezToa24=","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIIBwAYJKoZIhvcNAQcDoIIBsTCCAa0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEABILhQPoMx3NEbd/sS0xAAE4rJXwzJSE0bWr4OaKpcGS4ePtaNW8XWm+psYR9CBlXuGCuDVlFEdPmO2Ai8NX8TvT7RVYYc6yVQKpNQqO6Q9g9O52XXX4tBSFSCfoTzd1kbGC1c2wbXDyeROGCjraWuGHd4C9s9gytpgAlYicZjOqV3deo30F4vXZ+ZhCNpMkOvSXcsNpzTzQ/mskwNubN8MPkg/jEAzTHRpiJl3tjGtTqm00GHMqFF8/31jnoLQeQnWSmY+FBpiTUhPzyjufIcoZ+ueGXZiJ77xyH2Rghh5wvQM8oTVy2dwFQGeqjHOVgdgRNi/HgfZhcdltaQ8kjYDA7BgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECHPM0ZKBn+aWgBiVPT7zlkJA8eGuH7bNMTQCtGoJezToa24=","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIIB4AYJKoZIhvcNAQcDoIIB0TCCAc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEAGSKUDRN64DIB7FS7yKXa07OXaFPhmdNnNDOAOD3/WVFb9fQ2bztV46waq7iRO+lpz7LSerRzIe6Kod9zCfK7ryukRomVHIfTIBwPjQ+Otn8ZD2aVcrxR0EI95x/SGyiESJRQnOMbpoVSWSu2KJUCPfycQ4ODbaazDc61k0JCmmRy12rQ4ttyWKhYwpwI2OYFHGr39N/YYq6H8skHj5ve1605i4P9XpfEyIwF5BbX59tDOAFFQtX7jzQcz//LtaHHjwLmysmD9OG5XyvfbBICwSYJfMX9Jh1aahLwcjL8Bd0vYyGL1ItMQF5KfDwog4+HLcRGx+S02Yngm3/YKS9DmzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECFGLNfK0bO5OgDgH90bRzqfgKK6EEh52XJfHz9G/ZL1mqP/ueWqo95PtEFo1gvI7z25V/pT0tBGibXgRhQXLFmwVTA==","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIIB4AYJKoZIhvcNAQcDoIIB0TCCAc0CAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEAGSKUDRN64DIB7FS7yKXa07OXaFPhmdNnNDOAOD3/WVFb9fQ2bztV46waq7iRO+lpz7LSerRzIe6Kod9zCfK7ryukRomVHIfTIBwPjQ+Otn8ZD2aVcrxR0EI95x/SGyiESJRQnOMbpoVSWSu2KJUCPfycQ4ODbaazDc61k0JCmmRy12rQ4ttyWKhYwpwI2OYFHGr39N/YYq6H8skHj5ve1605i4P9XpfEyIwF5BbX59tDOAFFQtX7jzQcz//LtaHHjwLmysmD9OG5XyvfbBICwSYJfMX9Jh1aahLwcjL8Bd0vYyGL1ItMQF5KfDwog4+HLcRGx+S02Yngm3/YKS9DmzBbBgkqhkiG9w0BBwEwFAYIKoZIhvcNAwcECFGLNfK0bO5OgDgH90bRzqfgKK6EEh52XJfHz9G/ZL1mqP/ueWqo95PtEFo1gvI7z25V/pT0tBGibXgRhQXLFmwVTA==","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIIEzAYJKoZIhvcNAQcDoIIEvTCCBLkCAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEAFqLDBFGeuglluYmZb0Zw+ZlMiMIws9/LgmurVSRUTU/nSleIc9vOLcukfMeCpMativzHe23iDFy6p3XDkViNcuzqbhlPq5LQsXXg+xaUrrg8Xy+q7KUQdxzPdNBdpgkUh6yE2EFbqVLQ/7x+TkkSsw35uPT0nEqSj3yYFGH7X/NJ49fKU+ZvFDp/N+o54UbE6ZdxlHFtz6NJFxx5w4z5adQ8DgnUyS0bJ2denolknODfSW2D2alm00SXlI88CAjeHgEDkoLCduwkrDkSFAODcAiEHHX8oYCnfanatpjm7ZgSutS9y7+XUnGWxDYoujHDI9bbV0WpyDcx/DIrlZ+WcTCCA0UGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIrL18Lbp1qU6AggMgGklvozqr8HqYP+DwkvxdwHSpo+23QFxh70os+NJRtVgBv5NjPEziXo3FpXHMPvt0kp0IwXbwyy5vwnjCTA2sQOYgj77X6RmwF6+1gt2DIHDN1Q6jWzdcXZVHykSiF3gshbebRKO0hydfCaCyYL36HOZ8ugyCctOon5EflrnoOYDDHRbsr30DAxZCAwGOGZEeoU2+U+YdhuMvplnMryD1f6b8FQ7jXihe/zczAibX5/22NxhsVgALdsV5h6hwuTbspDt3V15/VU8ak7a4xxdBfXOX0HcQI86oqsFr7S7zIveoQHsW+wzlyMjwi6DRPFpz2wFkv5ivgFEvtCzDQP4aCqGI8VdqzR7aUDnuqiSCe/cbmv5mSmTYlDPTR03WS0IvgyeoNAzqCbYQe44AUBEZb/yT8Z3XxwW0GzcPMZQ0XjpcZiaKAueN9V8nJgNCEDPTJqpSjy+tEHmSgxn70+E57F0vzPvdQ3vOEeRj8zlBblHd4uVrhxdBMUuQ73JEQEha5rz0qcUy04Wmjld1rBuX6pdOqrArAYzTLJbIuLqDjlnYFsHLs9QBGvIEb9VFOlAm5JW8npBbIRHXqPfwZWs60+uNksTtsN3MxBxUWJPOByb4xRNx+nRpTOvfKKFlgq1ReK5bGSTCB7x0Ft3+T42LOQDrBPyxxtGzWs+aq05qFgI4n0h8X82wxJflK+kUdwvvG/ZY5MM+/le2zOrUeyzvxXsHoRetgg+DOk7v+v7VsuT1KuvTXvgzxoOFF3/T2pNPpE3h6bbP2BUqZ2yzPNziGFslywDLZ8W3OUZoQejGqobRePdgUoBi5q2um/sPnq81kOJ/qhIOVq581ZD4IQWLot8eK8vX0G/y7y71YelRR51cUfgR5WvZZf6LvYw+GpwOtSViugl9QxGCviSLgHTJSSEm0ijtbzKhwP4vEyydNDrz8+WYB8DNIV7K2Pc8JyxAM03FYX30CaaJ40pbEUuVQVEnkAD2E//29/ZzgNTf/LBMzMEP5j7wlL+QQpmPAtL/FlBrOJ4nDEqsOOhWzI1MN51xRZuv3e2RqzVPiSmrKtk=","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIIEzAYJKoZIhvcNAQcDoIIEvTCCBLkCAQAxggFpMIIBZQIBADBNMDkxNzA1BgoJkiaJk/IsZAEZFidXaW5kb3dzIEF6dXJlIENSUCBDZXJ0aWZpY2F0ZSBHZW5lcmF0b3ICEH3vWjYIrceWQigVQwoS8z0wDQYJKoZIhvcNAQEBBQAEggEAFqLDBFGeuglluYmZb0Zw+ZlMiMIws9/LgmurVSRUTU/nSleIc9vOLcukfMeCpMativzHe23iDFy6p3XDkViNcuzqbhlPq5LQsXXg+xaUrrg8Xy+q7KUQdxzPdNBdpgkUh6yE2EFbqVLQ/7x+TkkSsw35uPT0nEqSj3yYFGH7X/NJ49fKU+ZvFDp/N+o54UbE6ZdxlHFtz6NJFxx5w4z5adQ8DgnUyS0bJ2denolknODfSW2D2alm00SXlI88CAjeHgEDkoLCduwkrDkSFAODcAiEHHX8oYCnfanatpjm7ZgSutS9y7+XUnGWxDYoujHDI9bbV0WpyDcx/DIrlZ+WcTCCA0UGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIrL18Lbp1qU6AggMgGklvozqr8HqYP+DwkvxdwHSpo+23QFxh70os+NJRtVgBv5NjPEziXo3FpXHMPvt0kp0IwXbwyy5vwnjCTA2sQOYgj77X6RmwF6+1gt2DIHDN1Q6jWzdcXZVHykSiF3gshbebRKO0hydfCaCyYL36HOZ8ugyCctOon5EflrnoOYDDHRbsr30DAxZCAwGOGZEeoU2+U+YdhuMvplnMryD1f6b8FQ7jXihe/zczAibX5/22NxhsVgALdsV5h6hwuTbspDt3V15/VU8ak7a4xxdBfXOX0HcQI86oqsFr7S7zIveoQHsW+wzlyMjwi6DRPFpz2wFkv5ivgFEvtCzDQP4aCqGI8VdqzR7aUDnuqiSCe/cbmv5mSmTYlDPTR03WS0IvgyeoNAzqCbYQe44AUBEZb/yT8Z3XxwW0GzcPMZQ0XjpcZiaKAueN9V8nJgNCEDPTJqpSjy+tEHmSgxn70+E57F0vzPvdQ3vOEeRj8zlBblHd4uVrhxdBMUuQ73JEQEha5rz0qcUy04Wmjld1rBuX6pdOqrArAYzTLJbIuLqDjlnYFsHLs9QBGvIEb9VFOlAm5JW8npBbIRHXqPfwZWs60+uNksTtsN3MxBxUWJPOByb4xRNx+nRpTOvfKKFlgq1ReK5bGSTCB7x0Ft3+T42LOQDrBPyxxtGzWs+aq05qFgI4n0h8X82wxJflK+kUdwvvG/ZY5MM+/le2zOrUeyzvxXsHoRetgg+DOk7v+v7VsuT1KuvTXvgzxoOFF3/T2pNPpE3h6bbP2BUqZ2yzPNziGFslywDLZ8W3OUZoQejGqobRePdgUoBi5q2um/sPnq81kOJ/qhIOVq581ZD4IQWLot8eK8vX0G/y7y71YelRR51cUfgR5WvZZf6LvYw+GpwOtSViugl9QxGCviSLgHTJSSEm0ijtbzKhwP4vEyydNDrz8+WYB8DNIV7K2Pc8JyxAM03FYX30CaaJ40pbEUuVQVEnkAD2E//29/ZzgNTf/LBMzMEP5j7wlL+QQpmPAtL/FlBrOJ4nDEqsOOhWzI1MN51xRZuv3e2RqzVPiSmrKtk=","publicSettings":{"foo":"bar"}}}]}
diff --git a/tests/data/wire/ext_conf_no_public.xml b/tests/data/wire/ext_conf_no_public.xml
index 63e7013cc..5ee9635cc 100644
--- a/tests/data/wire/ext_conf_no_public.xml
+++ b/tests/data/wire/ext_conf_no_public.xml
@@ -39,7 +39,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK"}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK"}}]}
diff --git a/tests/data/wire/ext_conf_required_features.xml b/tests/data/wire/ext_conf_required_features.xml
index 798ba5c52..2dedcdbab 100644
--- a/tests/data/wire/ext_conf_required_features.xml
+++ b/tests/data/wire/ext_conf_required_features.xml
@@ -32,7 +32,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_rsm_version.xml b/tests/data/wire/ext_conf_rsm_version.xml
index 806063541..d76ac6453 100644
--- a/tests/data/wire/ext_conf_rsm_version.xml
+++ b/tests/data/wire/ext_conf_rsm_version.xml
@@ -25,7 +25,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
diff --git a/tests/data/wire/ext_conf_sequencing.xml b/tests/data/wire/ext_conf_sequencing.xml
index 3c9a2ddd7..99ffd402c 100644
--- a/tests/data/wire/ext_conf_sequencing.xml
+++ b/tests/data/wire/ext_conf_sequencing.xml
@@ -23,12 +23,12 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_settings_case_mismatch.xml b/tests/data/wire/ext_conf_settings_case_mismatch.xml
index 71286c5bf..cb7c82d73 100644
--- a/tests/data/wire/ext_conf_settings_case_mismatch.xml
+++ b/tests/data/wire/ext_conf_settings_case_mismatch.xml
@@ -25,27 +25,27 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
diff --git a/tests/data/wire/ext_conf_upgradeguid.xml b/tests/data/wire/ext_conf_upgradeguid.xml
index 2ec7147bb..7cd013b5b 100644
--- a/tests/data/wire/ext_conf_upgradeguid.xml
+++ b/tests/data/wire/ext_conf_upgradeguid.xml
@@ -19,7 +19,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/ext_conf_version_missing_in_agent_family.xml b/tests/data/wire/ext_conf_version_missing_in_agent_family.xml
index 3f81ed119..eee17a4ef 100644
--- a/tests/data/wire/ext_conf_version_missing_in_agent_family.xml
+++ b/tests/data/wire/ext_conf_version_missing_in_agent_family.xml
@@ -23,7 +23,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
diff --git a/tests/data/wire/ext_conf_version_missing_in_manifest.xml b/tests/data/wire/ext_conf_version_missing_in_manifest.xml
index c750d5d3a..4d3ebd70c 100644
--- a/tests/data/wire/ext_conf_version_missing_in_manifest.xml
+++ b/tests/data/wire/ext_conf_version_missing_in_manifest.xml
@@ -31,7 +31,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
diff --git a/tests/data/wire/ext_conf_version_not_from_rsm.xml b/tests/data/wire/ext_conf_version_not_from_rsm.xml
index 9da8f5da7..9636c80d4 100644
--- a/tests/data/wire/ext_conf_version_not_from_rsm.xml
+++ b/tests/data/wire/ext_conf_version_not_from_rsm.xml
@@ -25,7 +25,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
diff --git a/tests/data/wire/ext_conf_vm_not_enabled_for_rsm_upgrades.xml b/tests/data/wire/ext_conf_vm_not_enabled_for_rsm_upgrades.xml
index 384723f46..e7017c4ce 100644
--- a/tests/data/wire/ext_conf_vm_not_enabled_for_rsm_upgrades.xml
+++ b/tests/data/wire/ext_conf_vm_not_enabled_for_rsm_upgrades.xml
@@ -25,7 +25,7 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
diff --git a/tests/data/wire/incorrect-certs.xml b/tests/data/wire/incorrect-certs.xml
new file mode 100644
index 000000000..66a231ee8
--- /dev/null
+++ b/tests/data/wire/incorrect-certs.xml
@@ -0,0 +1,85 @@
+
+
+ 2012-11-30
+ 5
+ Pkcs7BlobWithPfxContents
+ MIIOgwYJKoZIhvcNAQcDoIIOdDCCDnACAQIxggEwMIIBLAIBAoAUiF8ZYMs9mMa8
+QOEMxDaIhGza+0IwDQYJKoZIhvcNAQEBBQAEggEAQW7GyeRVEhHSU1/dzV0IndH0
+rDQk+27MvlsWTcpNcgGFtfRYxu5bzmp0+DoimX3pRBlSFOpMJ34jpg4xs78EsSWH
+FRhCf3EGuEUBHo6yR8FhXDTuS7kZ0UmquiCI2/r8j8gbaGBNeP8IRizcAYrPMA5S
+E8l1uCrw7DHuLscbVni/7UglGaTfFS3BqS5jYbiRt2Qh3p+JPUfm51IG3WCIw/WS
+2QHebmHxvMFmAp8AiBWSQJizQBEJ1lIfhhBMN4A7NadMWAe6T2DRclvdrQhJX32k
+amOiogbW4HJsL6Hphn7Frrw3CENOdWMAvgQBvZ3EjAXgsJuhBA1VIrwofzlDljCC
+DTUGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIxcvw9qx4y0qAgg0QrINXpC23BWT2
+Fb9N8YS3Be9eO3fF8KNdM6qGf0kKR16l/PWyP2L+pZxCcCPk83d070qPdnJK9qpJ
+6S1hI80Y0oQnY9VBFrdfkc8fGZHXqm5jNS9G32v/AxYpJJC/qrAQnWuOdLtOZaGL
+94GEh3XRagvz1wifv8SRI8B1MzxrpCimeMxHkL3zvJFg9FjLGdrak868feqhr6Nb
+pqH9zL7bMq8YP788qTRELUnL72aDzGAM7HEj7V4yu2uD3i3Ryz3bqWaj9IF38Sa0
+6rACBkiNfZBPgExoMUm2GNVyx8hTis2XKRgz4NLh29bBkKrArK9sYDncE9ocwrrX
+AQ99yn03Xv6TH8bRp0cSj4jzBXc5RFsUQG/LxzJVMjvnkDbwNE41DtFiYz5QVcv1
+cMpTH16YfzSL34a479eNq/4+JAs/zcb2wjBskJipMUU4hNx5fhthvfKwDOQbLTqN
+HcP23iPQIhjdUXf6gpu5RGu4JZ0dAMHMHFKvNL6TNejwx/H6KAPp6rCRsYi6QhAb
+42SXdZmhAyQsFpGD9U5ieJApqeCHfj9Xhld61GqLJA9+WLVhDPADjqHoAVvrOkKH
+OtPegId/lWnCB7p551klAjiEA2/DKxFBIAEhqZpiLl+juZfMXovkdmGxMP4gvNNF
+gbS2k5A0IJ8q51gZcH1F56smdAmi5kvhPnFdy/9gqeI/F11F1SkbPVLImP0mmrFi
+zQD5JGfEu1psUYvhpOdaYDkmAK5qU5xHSljqZFz5hXNt4ebvSlurHAhunJb2ln3g
+AJUHwtZnVBrtYMB0w6fdwYqMxXi4vLeqUiHtIQtbOq32zlSryNPQqG9H0iP9l/G1
+t7oUfr9woI/B0kduaY9jd5Qtkqs1DoyfNMSaPNohUK/CWOTD51qOadzSvK0hJ+At
+033PFfv9ilaX6GmzHdEVEanrn9a+BoBCnGnuysHk/8gdswj9OzeCemyIFJD7iObN
+rNex3SCf3ucnAejJOA0awaLx88O1XTteUjcFn26EUji6DRK+8JJiN2lXSyQokNeY
+ox6Z4hFQDmw/Q0k/iJqe9/Dq4zA0l3Krkpra0DZoWh5kzYUA0g5+Yg6GmRNRa8YG
+tuuD6qK1SBEzmCYff6ivjgsXV5+vFBSjEpx2dPEaKdYxtHMOjkttuTi1mr+19dVf
+hSltbzfISbV9HafX76dhwZJ0QwsUx+aOW6OrnK8zoQc5AFOXpe9BrrOuEX01qrM0
+KX5tS8Zx5HqDLievjir194oi3r+nAiG14kYlGmOTHshu7keGCgJmzJ0iVG/i+TnV
+ZSLyd8OqV1F6MET1ijgR3OPL3kt81Zy9lATWk/DgKbGBkkKAnXO2HUw9U34JFyEy
+vEc81qeHci8sT5QKSFHiP3r8EcK8rT5k9CHpnbFmg7VWSMVD0/wRB/C4BiIw357a
+xyJ/q1NNvOZVAyYzIzf9TjwREtyeHEo5kS6hyWSn7fbFf3sNGO2I30veWOvE6kFA
+HMtF3NplOrTYcM7fAK5zJCBK20oU645TxI8GsICMog7IFidFMdRn4MaXpwAjEZO4
+44m2M+4XyeRCAZhp1Fu4mDiHGqgd44mKtwvLACVF4ygWZnACDpI17X88wMnwL4uU
+vgehLZdAE89gvukSCsET1inVBnn/hVenCRbbZ++IGv2XoYvRfeezfOoNUcJXyawQ
+JFqN0CRB5pliuCesTO2urn4HSwGGoeBd507pGWZmOAjbNjGswlJJXF0NFnNW/zWw
+UFYy+BI9axuhWTSnCXbNbngdNQKHznKe1Lwit6AI3U9jS33pM3W+pwUAQegVdtpG
+XT01YgiMCBX+b8B/xcWTww0JbeUwKXudzKsPhQmaA0lubAo04JACMfON8jSZCeRV
+TyIzgacxGU6YbEKH4PhYTGl9srcWIT9iGSYD53V7Kyvjumd0Y3Qc3JLnuWZT6Oe3
+uJ4xz9jJtoaTDvPJQNK3igscjZnWZSP8XMJo1/f7vbvD57pPt1Hqdirp1EBQNshk
+iX9CUh4fuGFFeHf6MtGxPofbXmvA2GYcFsOez4/2eOTEmo6H3P4Hrya97XHS0dmD
+zFSAjzAlacTrn1uuxtxFTikdOwvdmQJJEfyYWCB1lqWOZi97+7nzqyXMLvMgmwug
+ZF/xHFMhFTR8Wn7puuwf36JpPQiM4oQ/Lp66zkS4UlKrVsmSXIXudLMg8SQ5WqK8
+DjevEZwsHHaMtfDsnCAhAdRc2jCpyHKKnmhCDdkcdJJEymWKILUJI5PJ3XtiMHnR
+Sa35OOICS0lTq4VwhUdkGwGjRoY1GsriPHd6LOt1aom14yJros1h7ta604hSCn4k
+zj9p7wY9gfgkXWXNfmarrZ9NNwlHxzgSva+jbJcLmE4GMX5OFHHGlRj/9S1xC2Wf
+MY9orzlooGM74NtmRi4qNkFj3dQCde8XRR4wh2IvPUCsr4j+XaoCoc3R5Rn/yNJK
+zIkccJ2K14u9X/A0BLXHn5Gnd0tBYcVOqP6dQlW9UWdJC/Xooh7+CVU5cZIxuF/s
+Vvg+Xwiv3XqekJRu3cMllJDp5rwe5EWZSmnoAiGKjouKAIszlevaRiD/wT6Zra3c
+Wn/1U/sGop6zRscHR7pgI99NSogzpVGThUs+ez7otDBIdDbLpMjktahgWoi1Vqhc
+fNZXjA6ob4zTWY/16Ys0YWxHO+MtyWTMP1dnsqePDfYXGUHe8yGxylbcjfrsVYta
+4H6eYR86eU3eXB+MpS/iA4jBq4QYWR9QUkd6FDfmRGgWlMXhisPv6Pfnj384NzEV
+Emeg7tW8wzWR64EON9iGeGYYa2BBl2FVaayMEoUhthhFcDM1r3/Mox5xF0qnlys4
+goWkMzqbzA2t97bC0KDGzkcHT4wMeiJBLDZ7S2J2nDAEhcTLY0P2zvOB4879pEWx
+Bd15AyG1DvNssA5ooaDzKi/Li6NgDuMJ8W7+tmsBwDvwuf2N3koqBeXfKhR4rTqu
+Wg1k9fX3+8DzDf0EjtDZJdfWZAynONi1PhZGbNbaMKsQ+6TflkCACInRdOADR5GM
+rL7JtrgF1a9n0HD9vk2WGZqKI71tfS8zODkOZDD8aAusD2DOSmVZl48HX/t4i4Wc
+3dgi/gkCMrfK3wOujb8tL4zjnlVkM7kzKk0MgHuA1w81zFjeMFvigHes4IWhQVcz
+ek3l4bGifI2kzU7bGIi5e/019ppJzGsVcrOE/3z4GS0DJVk6fy7MEMIFx0LhJPlL
+T+9HMH85sSYb97PTiMWpfBvNw3FSC7QQT9FC3L8d/XtMY3NvZoc7Fz7cSGaj7NXG
+1OgVnAzMunPa3QaduoxMF9346s+4a+FrpRxL/3bb4skojjmmLqP4dsbD1uz0fP9y
+xSifnTnrtjumYWMVi+pEb5kR0sTHl0XS7qKRi3SEfv28uh72KdvcufonIA5rnEb5
++yqAZiqW2OxVsRoVLVODPswP4VIDiun2kCnfkQygPzxlZUeDZur0mmZ3vwC81C1Q
+dZcjlukZcqUaxybUloUilqfNeby+2Uig0krLh2+AM4EqR63LeZ/tk+zCitHeRBW0
+wl3Bd7ShBFg6kN5tCJlHf/G6suIJVr+A9BXfwekO9+//CutKakCwmJTUiNWbQbtN
+q3aNCnomyD3WjvUbitVO0CWYjZrmMLIsPtzyLQydpT7tjXpHgvwm5GYWdUGnNs4y
+NbA262sUl7Ku/GDw1CnFYXbxl+qxbucLtCdSIFR2xUq3rEO1MXlD/txdTxn6ANax
+hi9oBg8tHzuGYJFiCDCvbVVTHgWUSnm/EqfclpJzGmxt8g7vbaohW7NMmMQrLBFP
+G6qBypgvotx1iJWaHVLNNiXvyqQwTtelNPAUweRoNawBp/5KTwwy/tHeF0gsVQ7y
+mFX4umub9YT34Lpe7qUPKNxXzFcUgAf1SA6vyZ20UI7p42S2OT2PrahJ+uO6LQVD
++REhtN0oyS3G6HzAmKkBgw7LcV3XmAr39iSR7mdmoHSJuI9bjveAPhniK+N6uuln
+xf17Qnw5NWfr9MXcLli7zqwMglU/1bNirkwVqf/ogi/zQ3JYCo6tFGf/rnGQAORJ
+hvOq2SEYXnizPPIH7VrpE16+jUXwgpiQ8TDyeLPmpZVuhXTXiCaJO5lIwmLQqkmg
+JqNiT9V44sksNFTGNKgZo5O9rEqfqX4dLjfv6pGJL+MFXD9if4f1JQiXJfhcRcDh
+Ff9B6HukgbJ1H96eLUUNj8sL1+WPOqawkS4wg7tVaERE8CW7mqk15dCysn9shSut
+I+7JU7+dZsxpj0ownrxuPAFuT8ZlcBPrFzPUwTlW1G0CbuEco8ijfy5IfbyGCn5s
+K/0bOfAuNVGoOpLZ1dMki2bGdBwQOQlkLKhAxYcCVQ0/urr1Ab+VXU9kBsIU8ssN
+GogKngYpuUV0PHmpzmobielOHLjNqA2v9vQSV3Ed48wRy5OCwLX1+vYmYlggMDGt
+wfl+7QbXYf+k5WnELf3IqYvh8ZWexa0=
+
+
\ No newline at end of file
diff --git a/tests/data/wire/invalid_config/ext_conf_multiple_depends_on_for_single_handler.xml b/tests/data/wire/invalid_config/ext_conf_multiple_depends_on_for_single_handler.xml
index 8d76b732c..a9aa7c49a 100644
--- a/tests/data/wire/invalid_config/ext_conf_multiple_depends_on_for_single_handler.xml
+++ b/tests/data/wire/invalid_config/ext_conf_multiple_depends_on_for_single_handler.xml
@@ -28,16 +28,16 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/invalid_config/ext_conf_multiple_runtime_settings_same_plugin.xml b/tests/data/wire/invalid_config/ext_conf_multiple_runtime_settings_same_plugin.xml
index 43e1e0281..4de9a4ceb 100644
--- a/tests/data/wire/invalid_config/ext_conf_multiple_runtime_settings_same_plugin.xml
+++ b/tests/data/wire/invalid_config/ext_conf_multiple_runtime_settings_same_plugin.xml
@@ -21,8 +21,8 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/invalid_config/ext_conf_multiple_settings_for_same_handler.xml b/tests/data/wire/invalid_config/ext_conf_multiple_settings_for_same_handler.xml
index 7351c8bf5..a1cc86381 100644
--- a/tests/data/wire/invalid_config/ext_conf_multiple_settings_for_same_handler.xml
+++ b/tests/data/wire/invalid_config/ext_conf_multiple_settings_for_same_handler.xml
@@ -21,10 +21,10 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml b/tests/data/wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml
index dcf101464..7220b59c7 100644
--- a/tests/data/wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml
+++ b/tests/data/wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml
@@ -19,10 +19,10 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/invalid_config/ext_conf_single_and_multi_config_settings_same_plugin.xml b/tests/data/wire/invalid_config/ext_conf_single_and_multi_config_settings_same_plugin.xml
index 8a30ddbaf..899d23398 100644
--- a/tests/data/wire/invalid_config/ext_conf_single_and_multi_config_settings_same_plugin.xml
+++ b/tests/data/wire/invalid_config/ext_conf_single_and_multi_config_settings_same_plugin.xml
@@ -21,8 +21,8 @@
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
- {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"BD447EF71C3ADDF7C837E84D630F3FAC22CCD22F","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
+ {"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"F6ABAA61098A301EBB8A571C3C7CF77F355F7FA9","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}
https://test.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo
diff --git a/tests/data/wire/trans_cert b/tests/data/wire/trans_cert
index 35793e019..c522a2f51 100644
--- a/tests/data/wire/trans_cert
+++ b/tests/data/wire/trans_cert
@@ -1,19 +1,19 @@
-----BEGIN CERTIFICATE-----
-MIIDEzCCAfugAwIBAgIUDcHXiRT74wOkLZYnyoZibT9+2G8wDQYJKoZIhvcNAQEL
-BQAwGTEXMBUGA1UEAwwOTGludXhUcmFuc3BvcnQwHhcNMjIwODEyMTgzMTM5WhcN
-MjQwODExMTgzMTM5WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJ
-KoZIhvcNAQEBBQADggEPADCCAQoCggEBAK/XWh+Djc2WYoJ/8FkZd8OV3V47fID5
-WV8hSBz/i/hVUKHhCWTQfE4VcQBGYFyK8lMKIBV7t6Bq05TQGuB8148HSjIboDx3
-Ndd0C/+lYcBE1izMrHKZYhcy7lSlEUk+y5iye0cA5k/dlJhfwoxWolw0E2dMOjlY
-qzkEGJdyS6+hFddo696HzD7OYhxh1r50aHPWqY8NnC51487loOtPs4LYA2bd3HSg
-ECpOtKzyJW+GP0H2vBa7MrXrZOnD1K2j2xb8nTnYnpNtlmnZPj7VYFsLOlsq547X
-nFiSptPWslbVogkUVkCZlAqkMcJ/OtH70ZVjLyjFd6j7J/Wy8MrA7pECAwEAAaNT
-MFEwHQYDVR0OBBYEFGXBvV/uWivFWRWPHiVfY/kSJqufMB8GA1UdIwQYMBaAFGXB
-vV/uWivFWRWPHiVfY/kSJqufMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
-BQADggEBABjatix/q90u6X/Jar/UkKiL2zx36s4huPU9KG8F51g48cYRjrvpK4+H
-K6avCGArl7h1gczaGS7LTOHFUU25eg/BBcKcXEO3aryQph2A167ip89UM55LxlnC
-QVVV9HAnEw5qAoh0wlZ65fVN+SE8FdasYlbbbp7c4At/LZruSj+IIapZDwwJxcBk
-YlSOa34v1Uay09+Hgu95dYQjI9txJW1ViRVlDpKbieGTzROI6s3uk+3rhxxlH2Zi
-Z9UqNmPfH9UE1xgSk/wkMWW22h/x51qIRKAZ4EzmdHVXdT/BarIuHxtHH8hIPNSL
-FjetCMVZNBej2HXL9cY5UVFYCG6JG0Q=
+MIIDEzCCAfugAwIBAgIUToMqRt0z6FfqfiJhS1Hh+u2j3VEwDQYJKoZIhvcNAQEL
+BQAwGTEXMBUGA1UEAwwOTGludXhUcmFuc3BvcnQwHhcNMjQwODAxMTYwOTU2WhcN
+MjYwODAxMTYwOTU2WjAZMRcwFQYDVQQDDA5MaW51eFRyYW5zcG9ydDCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMs8jttzIHATj1BNs3r4cCOAMuVaM1b7
+Aw8D7Lz3rTxFieQCh1vLSFl1l9SQmO7rmh0OfEzIKK8jAU4wkLclgospKuYpB9ME
+5QnXbLpXWYfW99V4safGvv9lGZztGKMd4ZT2it9QcpKEFFi6W7cjIyiUuyYMB0uI
+IvA6s6tGs8LgN89Lx7HSTSR86QNPvRtTw0jlrr8nfM7EkaT9Q6xu6GjCp89wCx+h
+IwcPtstSgfMo5P+3IO30L1wSM+CF1n+nD9M8E4wtcxhoWLuyAPhDsw5f7jKyHmRo
+Nm9RxToM0ON67SmN2906i0NxzXWtuttww6KE/O6BEZKNlnp9ja3bnM8CAwEAAaNT
+MFEwHQYDVR0OBBYEFNPDyPggVKjneDW7XuzC8NqgmJ9VMB8GA1UdIwQYMBaAFNPD
+yPggVKjneDW7XuzC8NqgmJ9VMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBAFuVgcimwPxgpwKNvyUKMY9VFa6UVZs/ky6FEEaxrKVAl2GZF9MoSTO5
+vXMdWYHtSF+RWYxCz5pt7Bv97zuEXvbino/JvsLrE8f265Woe2CdDOPiBCHWBOlH
++wM71Hoh0TX7V2TSumona6e0cqUPT7fbNdaNZm8ZHoUscbbPmamERH9Z9zUXWPLk
+mtjwz17bvRriAMrglA/Dm3xHiEYBJv3+4FnOqPGfg9vZH6xfmrRwrF1Moj5jEZz5
+cN2N+vO8HCEqGMBCpSlsWq1c2r3NwLH0J3b6EL7X4jcVvpykKg3WmOZGdataYDk9
+0IHy8VyGiX7g3EJOAbbf12FjgLAt4NM=
-----END CERTIFICATE-----
diff --git a/tests/data/wire/trans_prv b/tests/data/wire/trans_prv
index 17bdb07c6..876b8351b 100644
--- a/tests/data/wire/trans_prv
+++ b/tests/data/wire/trans_prv
@@ -1,28 +1,28 @@
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCv11ofg43NlmKC
-f/BZGXfDld1eO3yA+VlfIUgc/4v4VVCh4Qlk0HxOFXEARmBcivJTCiAVe7egatOU
-0BrgfNePB0oyG6A8dzXXdAv/pWHARNYszKxymWIXMu5UpRFJPsuYsntHAOZP3ZSY
-X8KMVqJcNBNnTDo5WKs5BBiXckuvoRXXaOveh8w+zmIcYda+dGhz1qmPDZwudePO
-5aDrT7OC2ANm3dx0oBAqTrSs8iVvhj9B9rwWuzK162Tpw9Sto9sW/J052J6TbZZp
-2T4+1WBbCzpbKueO15xYkqbT1rJW1aIJFFZAmZQKpDHCfzrR+9GVYy8oxXeo+yf1
-svDKwO6RAgMBAAECggEAEwBogsNKjY7Usll09Yvk/0OwmkA/YgiP+dG04z1SONGv
-Vu7kfvpwlFeI0IjKXPW+3e5YLTojS7h/iLM8VEnpWVFmWSfXFvGi5ddqfIO4nnhR
-1KGBeRjOGsesLYVw6sNYaPXQkImuWa8OIbEnatbp0KDn/9+i4xOL3StuJN97Ak1u
-Giq4gwFbag4/QctBZ+5P0t77W+uzWcvEyNgK6rndfPWxqwmJSBFchY6O3s1l6NY8
-vSmyYhYRgFXEgX0nDumGfEXsF1Cj9tzYT2DUZc2f6+UCtXCD49qnoKawLhCrl5Uh
-QGs82TR5FSn7zLW4MbFody6p8UDw6pYiWlPPR7fmgQKBgQDO3j5RCXf0276K56BA
-rFpOOmivq3fxElRVCSRRRVPKHDYKQiPKnNXoa/pSl8a6CfjJaJzkNj+wTEFdQRGm
-Ia123kR/1S21/zgGZNmbUGby+A4fKxBY101/JQweucRN7aw3XLKPXhOL1NPyKdWh
-dARvjZvEl1qR6s07Y6jZgpkGqQKBgQDZmqVWvUgACdxkCYEzDf3Fc2G/8oL4VxWJ
-HHr5zib+DDhTfKrgQyA9CZ97stZfrR7KYnsLJH8jnj/w/CNOI0G+41KroICRsnjT
-5bm7/sT5uwLwu+FAQzITiehj7Te1lwsqtS8yOnXBTQ3hzaw9yhAsuhefx+WT2UCd
-Y8Od13nhqQKBgQCR2LR8s71D/81F52nfTuRYNOvrtmtYpkCYt1pIhiU94EflUZ4k
-UhCpzb7tjh5IuZEShtPePbUHWavX0HFd/G5s2OXYbnbM0oQwVdfpnXUHpgVmyhi7
-WghENN1nqDcTbha17X/ifkQvmLxZBk+chcw+zcrdfowXRkCtt2Sq/V1gCQKBgH/w
-UK3C9AYxxgZ7IB9oZoAk6p/0cdSZPuwydotRDdPoU2WissTQMrAwbDhKWYg/PQ84
-/6b5elbywB1r4UYbrJgTB5Qo9e6zxB6xvpYtoJpDveLUVAd4eoTKXHwECPEXMVWW
-2XzqqjlQmIzeZBqgJwplD2a+HNjkrvzanzS6b8qhAoGBAIun0EEc/Zc0ZxzgDPen
-A9/7jV++QCrNsevxGH8yrhPP4UqTVSHGR9H+RAif7zTBTn0OwzSBz6hFbPmxum3m
-cKabsKVN3poz3TBvfyhgjYosMWvCHpNhif09lyd/s2FezPGyK1Nyf5cKNEWjFGKw
-+fCPJ/Ihp4iwacNU1Pu9m050
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDLPI7bcyBwE49Q
+TbN6+HAjgDLlWjNW+wMPA+y89608RYnkAodby0hZdZfUkJju65odDnxMyCivIwFO
+MJC3JYKLKSrmKQfTBOUJ12y6V1mH1vfVeLGnxr7/ZRmc7RijHeGU9orfUHKShBRY
+ulu3IyMolLsmDAdLiCLwOrOrRrPC4DfPS8ex0k0kfOkDT70bU8NI5a6/J3zOxJGk
+/UOsbuhowqfPcAsfoSMHD7bLUoHzKOT/tyDt9C9cEjPghdZ/pw/TPBOMLXMYaFi7
+sgD4Q7MOX+4ysh5kaDZvUcU6DNDjeu0pjdvdOotDcc11rbrbcMOihPzugRGSjZZ6
+fY2t25zPAgMBAAECggEAE9CAJxIW4AZwKwagUIVnPXbSv3ynU7weRLj/vD6zg5RO
+CM5cTw1HLP2jg2RjnKuYt2uBn+TF3qldh7eBbHG6RAIL/iuS6TZpdCeuII7CmlVR
+jVz6iR594Z2EPUH6bHDN3P2adYI84V8CMtJcfcLtuxehFWkHzwvjSCOY/8JhZUbV
+ebXXc3zPdSu+WmeManXnzs4VgE6QnSNdyk67fvE1Qxi18s49XXWBPTg01hn+v2yJ
+QVuv36UP2MgIRZJE/PI9NL6tqqiHmY5sCIJ41hQLRxd/mnRC8hdHrfNNhqHVlC9g
+JoQQwn/dD12EZwyiQyJyGZOmFDrfv7G3d2QQVJ4OLQKBgQDrxf3nRK28CWaV2evS
+J4MZjTWmZGiNzMiqEtfTgd0v3+rs73WYaNfQ79Iejj6KJfJq7vtdawqGW1bPNfgF
+KJCdr3yxjpv5GsHF7fiE8ZWcQ6d6FTWNuayLOEbHnPemYTqg5pd1wsPgIBoE9Zqm
+zo1iuGxmwHos2yQgif9vEU99wwKBgQDcq/+aDscOO1oimJjAbBl95I8bOtSxR0Ip
+pv/iaB8+rrS18jiAygXuo34tq+L0HmoniMCuuVg4zhgAxzgnohTlsJpyGnzkdkmo
+TTan76WkFAedmurzQSu96p5F9HOc0MgluQHtPhO5SsjWhUgXxAU0Zoe+JnTVq0X+
+//8z1s64BQKBgEbanl4U7p0WuiSIc+0ZALX6EMhrXlxW0WsC9KdUXJNZmHER2WYv
+A8R/fca++p5rnvlxzkqZs3UDGAh3cIykTymEJlX5xHfNCbSgulHBhDOMxVTT8N8h
+kG/aPrMYQfhXOdZG1feGy3ScURVydcJxSl4DjFgouc6nIKlCr2fCbQAfAoGAVpez
+3EtSNzZ5HzxMLK3+rtUihufmEI7K2rdqj/iV0i4SQZeELp2YCFXlrJxXmb3ZoBvc
+qHOYt+m/p4aFdZ/3nU5YvM/CFJCKRN3PxcSXdjRZ7LGe4se/F25an07Wk0GmWI8p
+v2Ptr3c2Kl/ws0q7VB2rxKUokbP86pygE0KGqdUCgYAf8G1QLDZMq57XsNBpiITY
+xmS/vnmu2jj/DaTAiJ/gPkUaemoJ4xqhuIko7KqaNOBYoOMrOadldygNtrH1c5YE
+LKdPYQ9/bASF59DnBotKAv79n2svHFHNXkpZA+kIoH7QwhgKpwo3vNwcJcKRIBB9
+MjMnBzho1vIbdhoIHJ+Egw==
-----END PRIVATE KEY-----
diff --git a/tests/data/wire/trans_pub b/tests/data/wire/trans_pub
index 330ff4271..b090a7817 100644
--- a/tests/data/wire/trans_pub
+++ b/tests/data/wire/trans_pub
@@ -1,9 +1,9 @@
-----BEGIN PUBLIC KEY-----
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr9daH4ONzZZign/wWRl3
-w5XdXjt8gPlZXyFIHP+L+FVQoeEJZNB8ThVxAEZgXIryUwogFXu3oGrTlNAa4HzX
-jwdKMhugPHc113QL/6VhwETWLMyscpliFzLuVKURST7LmLJ7RwDmT92UmF/CjFai
-XDQTZ0w6OVirOQQYl3JLr6EV12jr3ofMPs5iHGHWvnRoc9apjw2cLnXjzuWg60+z
-gtgDZt3cdKAQKk60rPIlb4Y/Qfa8Frsytetk6cPUraPbFvydOdiek22Wadk+PtVg
-Wws6WyrnjtecWJKm09ayVtWiCRRWQJmUCqQxwn860fvRlWMvKMV3qPsn9bLwysDu
-kQIDAQAB
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyzyO23MgcBOPUE2zevhw
+I4Ay5VozVvsDDwPsvPetPEWJ5AKHW8tIWXWX1JCY7uuaHQ58TMgoryMBTjCQtyWC
+iykq5ikH0wTlCddsuldZh9b31Xixp8a+/2UZnO0Yox3hlPaK31BykoQUWLpbtyMj
+KJS7JgwHS4gi8Dqzq0azwuA3z0vHsdJNJHzpA0+9G1PDSOWuvyd8zsSRpP1DrG7o
+aMKnz3ALH6EjBw+2y1KB8yjk/7cg7fQvXBIz4IXWf6cP0zwTjC1zGGhYu7IA+EOz
+Dl/uMrIeZGg2b1HFOgzQ43rtKY3b3TqLQ3HNda2623DDooT87oERko2Wen2Nrduc
+zwIDAQAB
-----END PUBLIC KEY-----
diff --git a/tests/ga/test_agent_update_handler.py b/tests/ga/test_agent_update_handler.py
index c6e41469f..c2d01a424 100644
--- a/tests/ga/test_agent_update_handler.py
+++ b/tests/ga/test_agent_update_handler.py
@@ -10,7 +10,8 @@
from azurelinuxagent.common.protocol.util import ProtocolUtil
from azurelinuxagent.common.version import CURRENT_VERSION, AGENT_NAME
-from azurelinuxagent.ga.agent_update_handler import get_agent_update_handler
+from azurelinuxagent.ga.agent_update_handler import get_agent_update_handler, INITIAL_UPDATE_STATE_FILE, \
+ RSM_UPDATE_STATE_FILE
from azurelinuxagent.ga.guestagent import GuestAgent
from tests.ga.test_update import UpdateTestCase
from tests.lib.http_request_predicates import HttpRequestPredicates
@@ -28,7 +29,7 @@ def setUp(self):
clear_singleton_instances(ProtocolUtil)
@contextlib.contextmanager
- def _get_agent_update_handler(self, test_data=None, autoupdate_frequency=0.001, autoupdate_enabled=True, protocol_get_error=False, mock_get_header=None, mock_put_header=None):
+ def _get_agent_update_handler(self, test_data=None, autoupdate_frequency=0.001, autoupdate_enabled=True, initial_update_attempted=True, protocol_get_error=False, mock_get_header=None, mock_put_header=None):
# Default to DATA_FILE of test_data parameter raises the pylint warning
# W0102: Dangerous default value DATA_FILE (builtins.dict) as argument (dangerous-default-value)
test_data = DATA_FILE if test_data is None else test_data
@@ -57,6 +58,9 @@ def put_handler(url, *args, **_):
protocol.set_http_handlers(http_get_handler=http_get_handler, http_put_handler=http_put_handler)
+ if initial_update_attempted:
+ open(os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE), "a").close()
+
with patch("azurelinuxagent.common.conf.get_autoupdate_enabled", return_value=autoupdate_enabled):
with patch("azurelinuxagent.common.conf.get_autoupdate_frequency", return_value=autoupdate_frequency):
with patch("azurelinuxagent.common.conf.get_autoupdate_gafamily", return_value="Prod"):
@@ -407,6 +411,29 @@ def test_it_should_report_update_status_with_error_on_download_fail(self):
self.assertEqual("9.9.9.10", vm_agent_update_status.expected_version)
self.assertIn("Failed to download agent package from all URIs", vm_agent_update_status.message)
+ def test_it_should_not_report_error_status_if_new_rsm_version_is_same_as_current_after_last_update_attempt_failed(self):
+ data_file = DATA_FILE.copy()
+ data_file["ext_conf"] = "wire/ext_conf_rsm_version.xml"
+
+ with self._get_agent_update_handler(test_data=data_file, protocol_get_error=True) as (agent_update_handler, _):
+ agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True)
+ vm_agent_update_status = agent_update_handler.get_vmagent_update_status()
+ self.assertEqual(VMAgentUpdateStatuses.Error, vm_agent_update_status.status)
+ self.assertEqual(1, vm_agent_update_status.code)
+ self.assertEqual("9.9.9.10", vm_agent_update_status.expected_version)
+ self.assertIn("Failed to download agent package from all URIs", vm_agent_update_status.message)
+
+ # Send same version GS after last update attempt failed
+ agent_update_handler._protocol.mock_wire_data.set_version_in_agent_family(
+ str(CURRENT_VERSION))
+ agent_update_handler._protocol.mock_wire_data.set_incarnation(2)
+ agent_update_handler._protocol.client.update_goal_state()
+ agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True)
+ vm_agent_update_status = agent_update_handler.get_vmagent_update_status()
+ self.assertEqual(VMAgentUpdateStatuses.Success, vm_agent_update_status.status)
+ self.assertEqual(0, vm_agent_update_status.code)
+ self.assertEqual(str(CURRENT_VERSION), vm_agent_update_status.expected_version)
+
def test_it_should_report_update_status_with_missing_rsm_version_error(self):
data_file = DATA_FILE.copy()
data_file['ext_conf'] = "wire/ext_conf_version_missing_in_agent_family.xml"
@@ -452,7 +479,7 @@ def test_it_should_save_rsm_state_of_the_most_recent_goal_state(self):
with self.assertRaises(AgentUpgradeExitException):
agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True)
- state_file = os.path.join(conf.get_lib_dir(), "rsm_update.json")
+ state_file = os.path.join(conf.get_lib_dir(), RSM_UPDATE_STATE_FILE)
self.assertTrue(os.path.exists(state_file), "The rsm state file was not saved (can't find {0})".format(state_file))
# check if state gets updated if most recent goal state has different values
@@ -535,3 +562,36 @@ def http_get_handler(uri, *_, **__):
self.assertEqual(1, len([kwarg['message'] for _, kwarg in mock_telemetry.call_args_list if
"Downloaded agent package: WALinuxAgent-9.9.9.10 is missing agent handler manifest file" in kwarg['message'] and kwarg[
'op'] == WALAEventOperation.AgentUpgrade]), "Agent update should fail")
+
+ def test_it_should_use_self_update_for_first_update_always(self):
+ self.prepare_agents(count=1)
+
+ # mock the goal state as vm enrolled into RSM
+ data_file = DATA_FILE.copy()
+ data_file['ext_conf'] = "wire/ext_conf_rsm_version.xml"
+ with self._get_agent_update_handler(test_data=data_file, initial_update_attempted=False) as (agent_update_handler, mock_telemetry):
+ with self.assertRaises(AgentUpgradeExitException) as context:
+ agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True)
+ # Verifying agent used self-update for initial update
+ self._assert_update_discovered_from_agent_manifest(mock_telemetry, version="99999.0.0.0")
+ self._assert_agent_directories_exist_and_others_dont_exist(versions=[str(CURRENT_VERSION), "99999.0.0.0"])
+ self._assert_agent_exit_process_telemetry_emitted(ustr(context.exception.reason))
+
+ state_file = os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE)
+ self.assertTrue(os.path.exists(state_file),
+ "The first update state file was not saved (can't find {0})".format(state_file))
+
+ def test_it_should_honor_any_update_type_after_first_update(self):
+ self.prepare_agents(count=1)
+
+ data_file = DATA_FILE.copy()
+ data_file['ext_conf'] = "wire/ext_conf_rsm_version.xml"
+ # mocking initial update attempt as true
+ with self._get_agent_update_handler(test_data=data_file, initial_update_attempted=True) as (agent_update_handler, mock_telemetry):
+ with self.assertRaises(AgentUpgradeExitException) as context:
+ agent_update_handler.run(agent_update_handler._protocol.get_goal_state(), True)
+
+ # Verifying agent honored RSM update
+ self._assert_agent_rsm_version_in_goal_state(mock_telemetry, version="9.9.9.10")
+ self._assert_agent_directories_exist_and_others_dont_exist(versions=["9.9.9.10", str(CURRENT_VERSION)])
+ self._assert_agent_exit_process_telemetry_emitted(ustr(context.exception.reason))
diff --git a/tests/ga/test_cgroupapi.py b/tests/ga/test_cgroupapi.py
index ec077c90a..ae091ed9d 100644
--- a/tests/ga/test_cgroupapi.py
+++ b/tests/ga/test_cgroupapi.py
@@ -24,10 +24,12 @@
from azurelinuxagent.common.exception import CGroupsException
from azurelinuxagent.ga.cgroupapi import SystemdCgroupApiv1, SystemdCgroupApiv2, CGroupUtil, get_cgroup_api, \
- InvalidCgroupMountpointException
+ InvalidCgroupMountpointException, CgroupV1, CgroupV2
from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.osutil import systemd
from azurelinuxagent.common.utils import fileutil
+from azurelinuxagent.ga.cpucontroller import CpuControllerV1, CpuControllerV2
+from azurelinuxagent.ga.memorycontroller import MemoryControllerV1, MemoryControllerV2
from tests.lib.mock_cgroup_environment import mock_cgroup_v1_environment, mock_cgroup_v2_environment, \
mock_cgroup_hybrid_environment
from tests.lib.mock_environment import MockCommand
@@ -85,7 +87,7 @@ def test_cgroups_should_be_supported_only_on_ubuntu16_centos7dot4_redhat7dot4_an
class SystemdCgroupsApiTestCase(AgentTestCase):
- def test_get_cgroup_api_raises_exception_when_systemd_mount_point_does_not_exist(self):
+ def test_get_cgroup_api_raises_exception_when_systemd_mountpoint_does_not_exist(self):
with mock_cgroup_v1_environment(self.tmp_dir):
# Mock os.path.exists to return False for the os.path.exists(CGROUP_FILE_SYSTEM_ROOT) check
with patch("os.path.exists", return_value=False):
@@ -151,106 +153,16 @@ def test_get_unit_property_should_return_the_value_of_the_given_property(self):
class SystemdCgroupsApiv1TestCase(AgentTestCase):
- def test_get_unit_cgroup_paths_should_return_the_cgroup_v1_mount_points(self):
- with mock_cgroup_v1_environment(self.tmp_dir):
- cpu, memory = get_cgroup_api().get_unit_cgroup_paths("extension.service")
- self.assertIn(cpu, '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service',
- "The mount point for the CPU controller is incorrect")
- self.assertIn(memory, '/sys/fs/cgroup/memory/system.slice/extension.service',
- "The mount point for the memory controller is incorrect")
-
- def test_get_unit_cgroup_path_should_return_None_if_either_cgroup_v1_controller_not_mounted(self):
- with mock_cgroup_v1_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_controller_root_paths', return_value=('/sys/fs/cgroup/cpu,cpuacct', None)):
- cpu, memory = get_cgroup_api().get_unit_cgroup_paths("extension.service")
- self.assertIn(cpu, '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service',
- "The mount point for the CPU controller is incorrect")
- self.assertIsNone(memory,
- "The mount point for the memory controller is None so unit cgroup should be None")
-
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_controller_root_paths', return_value=(None, '/sys/fs/cgroup/memory')):
- cpu, memory = get_cgroup_api().get_unit_cgroup_paths("extension.service")
- self.assertIsNone(cpu, "The mount point for the cpu controller is None so unit cgroup should be None")
- self.assertIn(memory, '/sys/fs/cgroup/memory/system.slice/extension.service',
- "The mount point for the memory controller is incorrect")
-
- def test_get_process_cgroup_paths_should_return_the_cgroup_v1_mount_points(self):
- with mock_cgroup_v1_environment(self.tmp_dir):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIn(cpu, '/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service',
- "The mount point for the CPU controller is incorrect")
- self.assertIn(memory, '/sys/fs/cgroup/memory/system.slice/walinuxagent.service',
- "The mount point for the memory controller is incorrect")
-
- def test_get_process_cgroup_path_should_return_None_if_either_cgroup_v1_controller_not_mounted(self):
- with mock_cgroup_v1_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_controller_root_paths', return_value=('/sys/fs/cgroup/cpu,cpuacct', None)):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIn(cpu, '/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service',
- "The mount point for the CPU controller is incorrect")
- self.assertIsNone(memory,
- "The mount point for the memory controller is None so unit cgroup should be None")
-
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_controller_root_paths', return_value=(None, '/sys/fs/cgroup/memory')):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIsNone(cpu, "The mount point for the CPU controller is None so unit cgroup should be None")
- self.assertIn(memory, '/sys/fs/cgroup/memory/system.slice/walinuxagent.service',
- "The mount point for the memory controller is incorrect")
-
- def test_get_process_cgroup_v1_path_should_return_None_if_either_relative_path_is_None(self):
- with mock_cgroup_v1_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup_relative_paths', return_value=('system.slice/walinuxagent.service', None)):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIn(cpu, '/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service',
- "The mount point for the CPU controller is incorrect")
- self.assertIsNone(memory,
- "The relative cgroup path for the memory controller is None so unit cgroup should be None")
-
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup_relative_paths', return_value=(None, 'system.slice/walinuxagent.service')):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIsNone(cpu, "The relative cgroup path for the cpu controller is None so unit cgroup should be None")
- self.assertIn(memory, '/sys/fs/cgroup/memory/system.slice/walinuxagent.service',
- "The mount point for the memory controller is incorrect")
-
- def test_get_controller_root_paths_should_return_the_cgroup_v1_controller_mount_points(self):
- with mock_cgroup_v1_environment(self.tmp_dir):
- cpu, memory = get_cgroup_api().get_controller_root_paths()
- self.assertEqual(cpu, '/sys/fs/cgroup/cpu,cpuacct', "The root cgroup for the CPU controller is incorrect")
- self.assertEqual(memory, '/sys/fs/cgroup/memory', "The root cgroup for the memory controller is incorrect")
-
- def test_get_controller_root_paths_should_return_None_if_either_controller_not_mounted(self):
- with mock_cgroup_v1_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'memory': '/sys/fs/cgroup/memory', 'io': '/sys/fs/cgroup/io'}):
- cpu, memory = get_cgroup_api().get_controller_root_paths()
- self.assertIsNone(cpu, "The CPU controller is mot mounted, so the cpu controller path should be None")
- self.assertEqual(memory, '/sys/fs/cgroup/memory', "The root cgroup for the memory controller is incorrect")
-
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'io': '/sys/fs/cgroup/io'}):
- cpu, memory = get_cgroup_api().get_controller_root_paths()
- self.assertIsNone(memory, "The memory controller is mot mounted, so the memory controller path should be None")
- self.assertEqual(cpu, '/sys/fs/cgroup/cpu,cpuacct', "The root cgroup for the cpu controller is incorrect")
-
- def test_get_controller_mountpoints_should_return_all_controller_mount_points(self):
+ def test_get_controller_mountpoints_should_return_only_supported_controllers(self):
with mock_cgroup_v1_environment(self.tmp_dir):
cgroup_api = get_cgroup_api()
# Expected value comes from findmnt output in the mocked environment
self.assertEqual(cgroup_api._get_controller_mountpoints(), {
- 'systemd': '/sys/fs/cgroup/systemd',
- 'devices': '/sys/fs/cgroup/devices',
- 'rdma': '/sys/fs/cgroup/rdma',
- 'perf_event': '/sys/fs/cgroup/perf_event',
- 'net_cls,net_prio': '/sys/fs/cgroup/net_cls,net_prio',
- 'blkio': '/sys/fs/cgroup/blkio',
- 'cpuset': '/sys/fs/cgroup/cpuset',
- 'misc': '/sys/fs/cgroup/misc',
'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct',
- 'memory': '/sys/fs/cgroup/memory',
- 'freezer': '/sys/fs/cgroup/freezer',
- 'hugetlb': '/sys/fs/cgroup/hugetlb',
- 'pids': '/sys/fs/cgroup/pids',
+ 'memory': '/sys/fs/cgroup/memory'
}, "The controller mountpoints are not correct")
- def test_are_mountpoints_systemd_created_should_return_False_if_cpu_or_memory_are_not_systemd_mountpoints(self):
+ def test_are_mountpoints_systemd_created_should_return_False_if_mountpoints_are_not_systemd(self):
with mock_cgroup_v1_environment(self.tmp_dir):
with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/custom/mountpoint/path', 'memory': '/custom/mountpoint/path'}):
self.assertFalse(SystemdCgroupApiv1().are_mountpoints_systemd_created())
@@ -261,23 +173,123 @@ def test_are_mountpoints_systemd_created_should_return_False_if_cpu_or_memory_ar
with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'memory': '/custom/mountpoint/path'}):
self.assertFalse(SystemdCgroupApiv1().are_mountpoints_systemd_created())
- def test_are_mountpoints_systemd_created_should_return_True_if_cpu_and_memory_are_systemd_mountpoints(self):
+ def test_are_mountpoints_systemd_created_should_return_True_if_mountpoints_are_systemd(self):
with mock_cgroup_v1_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup', 'memory': '/sys/fs/cgroup'}):
- self.assertFalse(SystemdCgroupApiv1().are_mountpoints_systemd_created())
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory': '/sys/fs/cgroup/memory'}):
+ self.assertTrue(SystemdCgroupApiv1().are_mountpoints_systemd_created())
# are_mountpoints_systemd_created should only check controllers which are mounted
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup'}):
- self.assertFalse(SystemdCgroupApiv1().are_mountpoints_systemd_created())
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}):
+ self.assertTrue(SystemdCgroupApiv1().are_mountpoints_systemd_created())
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'memory': '/sys/fs/cgroup'}):
- self.assertFalse(SystemdCgroupApiv1().are_mountpoints_systemd_created())
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'memory': '/sys/fs/cgroup/memory'}):
+ self.assertTrue(SystemdCgroupApiv1().are_mountpoints_systemd_created())
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}):
+ self.assertTrue(SystemdCgroupApiv1().are_mountpoints_systemd_created())
- def test_get_cpu_and_memory_cgroup_relative_paths_for_process_should_return_the_cgroup_v1_relative_paths(self):
+ def test_get_relative_paths_for_process_should_return_the_cgroup_v1_relative_paths(self):
with mock_cgroup_v1_environment(self.tmp_dir):
- cpu, memory = get_cgroup_api().get_process_cgroup_relative_paths('self')
- self.assertEqual(cpu, "system.slice/walinuxagent.service", "The relative path for the CPU cgroup is incorrect")
- self.assertEqual(memory, "system.slice/walinuxagent.service", "The relative memory for the CPU cgroup is incorrect")
+ relative_paths = get_cgroup_api()._get_process_relative_controller_paths('self')
+ self.assertEqual(len(relative_paths), 2)
+ self.assertEqual(relative_paths.get('cpu,cpuacct'), "system.slice/walinuxagent.service", "The relative path for the CPU cgroup is incorrect")
+ self.assertEqual(relative_paths.get('memory'), "system.slice/walinuxagent.service", "The relative memory for the memory cgroup is incorrect")
+
+ def test_get_unit_cgroup_should_return_correct_paths_for_cgroup_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "extension")
+ self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct':'/sys/fs/cgroup/cpu,cpuacct', 'memory':'/sys/fs/cgroup/memory'})
+ self.assertEqual(cgroup._controller_paths, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service', 'memory': '/sys/fs/cgroup/memory/system.slice/extension.service'})
+
+ def test_get_unit_cgroup_should_return_only_mounted_controllers_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}):
+ cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "extension")
+ self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct':'/sys/fs/cgroup/cpu,cpuacct'})
+ self.assertEqual(cgroup._controller_paths, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'})
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}):
+ cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "extension")
+ self.assertEqual(cgroup._controller_mountpoints, {})
+ self.assertEqual(cgroup._controller_paths, {})
+
+ def test_get_cgroup_from_relative_path_should_return_the_correct_paths_for_cgroup_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "test_cgroup")
+ self.assertEqual(cgroup._controller_mountpoints,
+ {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory': '/sys/fs/cgroup/memory'})
+ self.assertEqual(cgroup._controller_paths,
+ {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/some/relative/path',
+ 'memory': '/sys/fs/cgroup/memory/some/relative/path'})
+
+ def test_get_cgroup_from_relative_path_should_return_only_mounted_controllers_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}):
+ cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "test_cgroup")
+ self.assertEqual(cgroup._controller_mountpoints,
+ {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'})
+ self.assertEqual(cgroup._controller_paths,
+ {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/some/relative/path'})
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}):
+ cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "test_cgroup")
+ self.assertEqual(cgroup._controller_mountpoints, {})
+ self.assertEqual(cgroup._controller_paths, {})
+
+ def test_get_process_cgroup_should_return_the_correct_paths_for_cgroup_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "walinuxagent")
+ self.assertEqual(cgroup._controller_mountpoints,
+ {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory': '/sys/fs/cgroup/memory'})
+ self.assertEqual(cgroup._controller_paths,
+ {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service',
+ 'memory': '/sys/fs/cgroup/memory/system.slice/walinuxagent.service'})
+
+ def test_get_process_cgroup_should_return_only_mounted_controllers_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "walinuxagent")
+ self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'})
+ self.assertEqual(cgroup._controller_paths, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service'})
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "walinuxagent")
+ self.assertEqual(cgroup._controller_mountpoints, {})
+ self.assertEqual(cgroup._controller_paths, {})
+
+ def test_get_process_cgroup_should_return_only_mounted_process_controllers_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'relative/path'}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "walinuxagent")
+ self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory':'/sys/fs/cgroup/memory'})
+ self.assertEqual(cgroup._controller_paths, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct/relative/path'})
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertIsInstance(cgroup, CgroupV1)
+ self.assertEqual(cgroup._cgroup_name, "walinuxagent")
+ self.assertEqual(cgroup._controller_mountpoints, {'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct', 'memory':'/sys/fs/cgroup/memory'})
+ self.assertEqual(cgroup._controller_paths, {})
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_cgroups_v1_command_should_return_the_command_output(self, _):
@@ -354,17 +366,6 @@ def test_start_extension_cgroups_v1_command_should_use_systemd_to_execute_the_co
class SystemdCgroupsApiv2TestCase(AgentTestCase):
- def test_get_controllers_enabled_at_root_should_return_list_of_enabled_controllers(self):
- with mock_cgroup_v2_environment(self.tmp_dir):
- cgroup_api = get_cgroup_api()
- self.assertEqual(cgroup_api._get_controllers_enabled_at_root('/sys/fs/cgroup'), ['cpuset', 'cpu', 'io', 'memory', 'pids'])
-
- def test_get_controllers_enabled_at_root_should_return_empty_list_if_root_cgroup_path_is_None(self):
- with mock_cgroup_v2_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=None):
- cgroup_api = get_cgroup_api()
- self.assertEqual(cgroup_api._controllers_enabled_at_root, [])
-
def test_get_root_cgroup_path_should_return_v2_cgroup_root(self):
with mock_cgroup_v2_environment(self.tmp_dir):
cgroup_api = get_cgroup_api()
@@ -374,97 +375,113 @@ def test_get_root_cgroup_path_should_only_match_systemd_mountpoint(self):
with mock_cgroup_v2_environment(self.tmp_dir) as env:
# Mock an environment which has multiple v2 mountpoints
env.add_command(MockCommand(r"^findmnt -t cgroup2 --noheadings$",
-'''/custom/mountpoint/path1 cgroup2 cgroup2 rw,relatime
-/sys/fs/cgroup cgroup2 cgroup2 rw,nosuid,nodev,noexec,relatime
-/custom/mountpoint/path2 none cgroup2 rw,relatime
-'''))
+ '''/custom/mountpoint/path1 cgroup2 cgroup2 rw,relatime
+ /sys/fs/cgroup cgroup2 cgroup2 rw,nosuid,nodev,noexec,relatime
+ /custom/mountpoint/path2 none cgroup2 rw,relatime
+ '''))
cgroup_api = get_cgroup_api()
self.assertEqual(cgroup_api._get_root_cgroup_path(), '/sys/fs/cgroup')
- def test_get_unit_cgroup_paths_should_return_the_cgroup_v2_cgroup_paths(self):
+ def test_get_controllers_enabled_at_root_should_return_list_of_agent_supported_and_enabled_controllers(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- cpu, memory = get_cgroup_api().get_unit_cgroup_paths("extension.service")
- self.assertEqual(cpu, '/sys/fs/cgroup/system.slice/extension.service',
- "The cgroup path for the CPU controller is incorrect")
- self.assertEqual(memory, '/sys/fs/cgroup/system.slice/extension.service',
- "The cgroup path for the memory controller is incorrect")
+ cgroup_api = get_cgroup_api()
+ enabled_controllers = cgroup_api._get_controllers_enabled_at_root('/sys/fs/cgroup')
+ self.assertEqual(len(enabled_controllers), 2)
+ self.assertIn('cpu', enabled_controllers)
+ self.assertIn('memory', enabled_controllers)
- def test_get_unit_cgroup_path_should_return_None_if_either_cgroup_v2_controller_not_enabled(self):
+ def test_get_controllers_enabled_at_root_should_return_empty_list_if_root_cgroup_path_is_empty(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_controller_root_paths', return_value=('/sys/fs/cgroup', None)):
- cpu, memory = get_cgroup_api().get_unit_cgroup_paths("extension.service")
- self.assertIn(cpu, '/sys/fs/cgroup/system.slice/extension.service',
- "The cgroup path for the CPU controller is incorrect")
- self.assertIsNone(memory,
- "The cgroup path for the memory controller is None so unit cgroup should be None")
-
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_controller_root_paths', return_value=(None, '/sys/fs/cgroup')):
- cpu, memory = get_cgroup_api().get_unit_cgroup_paths("extension.service")
- self.assertIsNone(cpu, "The cgroup path for the cpu controller is None so unit cgroup should be None")
- self.assertIn(memory, '/sys/fs/cgroup/system.slice/extension.service',
- "The cgroup path for the memory controller is incorrect")
-
- def test_get_process_cgroup_paths_should_return_the_cgroup_v2_cgroup_paths(self):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""):
+ cgroup_api = get_cgroup_api()
+ self.assertEqual(cgroup_api._controllers_enabled_at_root, [])
+
+ def test_get_process_relative_cgroup_path_should_return_relative_path(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIn(cpu, '/sys/fs/cgroup/system.slice/walinuxagent.service',
- "The cgroup path for the CPU controller is incorrect")
- self.assertIn(memory, '/sys/fs/cgroup/system.slice/walinuxagent.service',
- "The cgroup path for the memory controller is incorrect")
+ cgroup_api = get_cgroup_api()
+ self.assertEqual(cgroup_api._get_process_relative_cgroup_path(process_id="self"), "system.slice/walinuxagent.service")
- def test_get_process_cgroup_path_should_return_None_if_either_cgroup_v2_controller_not_enabled(self):
+ def test_get_unit_cgroup_should_return_correct_paths_for_cgroup_v2(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_controller_root_paths', return_value=('/sys/fs/cgroup', None)):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIn(cpu, '/sys/fs/cgroup/system.slice/walinuxagent.service',
- "The cgroup path for the CPU controller is incorrect")
- self.assertIsNone(memory,
- "The cgroup path for the memory controller is None so unit cgroup should be None")
-
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_controller_root_paths', return_value=(None, '/sys/fs/cgroup')):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIsNone(cpu, "The cgroup path for the CPU controller is None so unit cgroup should be None")
- self.assertIn(memory, '/sys/fs/cgroup/system.slice/walinuxagent.service',
- "The cgroup path for the memory controller is incorrect")
-
- def test_get_process_cgroup_v2_path_should_return_None_if_relative_path_is_None(self):
+ cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension")
+ self.assertIsInstance(cgroup, CgroupV2)
+ self.assertEqual(cgroup._cgroup_name, "extension")
+ self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup")
+ self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/system.slice/extension.service")
+ self.assertEqual(len(cgroup._enabled_controllers), 2)
+ self.assertIn('cpu', cgroup._enabled_controllers)
+ self.assertIn('memory', cgroup._enabled_controllers)
+
+ def test_get_unit_cgroup_should_return_empty_paths_if_root_path_empty_v2(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup_relative_paths', return_value=(None, None)):
- cpu, memory = get_cgroup_api().get_process_cgroup_paths("self")
- self.assertIsNone(cpu, "The relative cgroup path for the cpu controller is None so unit cgroup should be None")
- self.assertIsNone(memory,
- "The relative cgroup path for the memory controller is None so unit cgroup should be None")
-
- def test_get_controller_root_paths_should_return_the_cgroup_v2_root_cgroup_path(self):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""):
+ cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension")
+ self.assertIsInstance(cgroup, CgroupV2)
+ self.assertEqual(cgroup._cgroup_name, "extension")
+ self.assertEqual(cgroup._root_cgroup_path, "")
+ self.assertEqual(cgroup._cgroup_path, "")
+ self.assertEqual(len(cgroup._enabled_controllers), 0)
+
+ def test_get_unit_cgroup_should_return_only_enabled_controllers_v2(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- cpu, memory = get_cgroup_api().get_controller_root_paths()
- self.assertEqual(cpu, '/sys/fs/cgroup', "The root cgroup for the CPU controller is incorrect")
- self.assertEqual(memory, '/sys/fs/cgroup', "The root cgroup for the memory controller is incorrect")
-
- def test_get_controller_root_paths_should_return_None_if_root_cgroup_path_is_None(self):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=['cpu']):
+ cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension")
+ self.assertIsInstance(cgroup, CgroupV2)
+ self.assertEqual(cgroup._cgroup_name, "extension")
+ self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup")
+ self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/system.slice/extension.service")
+ self.assertEqual(len(cgroup._enabled_controllers), 1)
+ self.assertIn('cpu', cgroup._enabled_controllers)
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=[]):
+ cgroup = get_cgroup_api().get_unit_cgroup(unit_name="extension.service", cgroup_name="extension")
+ self.assertIsInstance(cgroup, CgroupV2)
+ self.assertEqual(cgroup._cgroup_name, "extension")
+ self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup")
+ self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/system.slice/extension.service")
+ self.assertEqual(len(cgroup._enabled_controllers), 0)
+
+ def test_get_cgroup_from_relative_path_should_return_the_correct_paths_for_cgroup_v2(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=None):
- cpu, memory = get_cgroup_api().get_controller_root_paths()
- self.assertIsNone(cpu, "The root cgroup path is None, so the CPU controller path should be None")
- self.assertIsNone(memory, "The root cgroup path is None, so the memory controller path should be None")
-
- def test_get_controller_root_paths_should_return_None_if_either_controller_not_enabled(self):
+ cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup")
+ self.assertIsInstance(cgroup, CgroupV2)
+ self.assertEqual(cgroup._cgroup_name, "test_cgroup")
+ self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup")
+ self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/some/relative/path")
+ self.assertEqual(len(cgroup._enabled_controllers), 2)
+ self.assertIn('cpu', cgroup._enabled_controllers)
+ self.assertIn('memory', cgroup._enabled_controllers)
+
+ def test_get_cgroup_from_relative_path_should_return_empty_paths_if_root_path_empty_v2(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=['io', 'memory']):
- cpu, memory = get_cgroup_api().get_controller_root_paths()
- self.assertIsNone(cpu, "The CPU controller is not enabled, so the CPU controller path should be None")
- self.assertEqual(memory, '/sys/fs/cgroup', "The root cgroup for the memory controller is incorrect")
-
- with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=['cpu', 'io']):
- cpu, memory = get_cgroup_api().get_controller_root_paths()
- self.assertEqual(cpu, '/sys/fs/cgroup', "The root cgroup for the CPU controller is incorrect")
- self.assertIsNone(memory, "The memory controller is not enabled, so the memory controller path should be None")
-
- def test_get_cpu_and_memory_cgroup_relative_paths_for_process_should_return_the_cgroup_v2_relative_paths(self):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""):
+ cgroup = get_cgroup_api().get_cgroup_from_relative_path(relative_path="some/relative/path", cgroup_name="test_cgroup")
+ self.assertIsInstance(cgroup, CgroupV2)
+ self.assertEqual(cgroup._cgroup_name, "test_cgroup")
+ self.assertEqual(cgroup._root_cgroup_path, "")
+ self.assertEqual(cgroup._cgroup_path, "")
+ self.assertEqual(len(cgroup._enabled_controllers), 0)
+
+ def test_get_process_cgroup_should_return_the_correct_paths_for_cgroup_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertIsInstance(cgroup, CgroupV2)
+ self.assertEqual(cgroup._cgroup_name, "walinuxagent")
+ self.assertEqual(cgroup._root_cgroup_path, "/sys/fs/cgroup")
+ self.assertEqual(cgroup._cgroup_path, "/sys/fs/cgroup/system.slice/walinuxagent.service")
+ self.assertEqual(len(cgroup._enabled_controllers), 2)
+ self.assertIn('cpu', cgroup._enabled_controllers)
+ self.assertIn('memory', cgroup._enabled_controllers)
+
+ def test_get_process_cgroup_should_return_empty_paths_if_root_path_empty_v2(self):
with mock_cgroup_v2_environment(self.tmp_dir):
- cpu, memory = get_cgroup_api().get_process_cgroup_relative_paths('self')
- self.assertEqual(cpu, "system.slice/walinuxagent.service", "The relative path for the CPU cgroup is incorrect")
- self.assertEqual(memory, "system.slice/walinuxagent.service", "The relative memory for the CPU cgroup is incorrect")
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertIsInstance(cgroup, CgroupV2)
+ self.assertEqual(cgroup._cgroup_name, "walinuxagent")
+ self.assertEqual(cgroup._root_cgroup_path, "")
+ self.assertEqual(cgroup._cgroup_path, "")
+ self.assertEqual(len(cgroup._enabled_controllers), 0)
class SystemdCgroupsApiMockedFileSystemTestCase(_MockedFileSystemTestCase):
@@ -483,3 +500,227 @@ def test_cleanup_legacy_cgroups_should_remove_legacy_cgroups(self):
self.assertEqual(legacy_cgroups, 2, "cleanup_legacy_cgroups() did not find all the expected cgroups")
self.assertFalse(os.path.exists(legacy_cpu_cgroup), "cleanup_legacy_cgroups() did not remove the CPU legacy cgroup")
self.assertFalse(os.path.exists(legacy_memory_cgroup), "cleanup_legacy_cgroups() did not remove the memory legacy cgroup")
+
+
+class CgroupsApiv1TestCase(AgentTestCase):
+ def test_get_supported_controllers_returns_v1_controllers(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_supported_controller_names()
+ self.assertEqual(len(controllers), 2)
+ self.assertIn('cpu,cpuacct', controllers)
+ self.assertIn('memory', controllers)
+
+ def test_check_in_expected_slice_returns_True_if_all_paths_in_expected_slice(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertTrue(cgroup.check_in_expected_slice(expected_slice='system.slice'))
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'system.slice/walinuxagent.service'}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertTrue(cgroup.check_in_expected_slice(expected_slice='system.slice'))
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertTrue(cgroup.check_in_expected_slice(expected_slice='system.slice'))
+
+ def test_check_in_expected_slice_returns_False_if_any_paths_not_in_expected_slice(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertFalse(cgroup.check_in_expected_slice(expected_slice='user.slice'))
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'system.slice/walinuxagent.service', 'memory': 'user.slice/walinuxagent.service'}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertFalse(cgroup.check_in_expected_slice(expected_slice='user.slice'))
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': '', 'memory': ''}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertFalse(cgroup.check_in_expected_slice(expected_slice='system.slice'))
+
+ def test_get_controllers_returns_all_supported_controllers_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 2)
+ self.assertIsInstance(controllers[0], CpuControllerV1)
+ self.assertEqual(controllers[0].name, "walinuxagent")
+ self.assertEqual(controllers[0].path, "/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service")
+ self.assertIsInstance(controllers[1], MemoryControllerV1)
+ self.assertEqual(controllers[1].name, "walinuxagent")
+ self.assertEqual(controllers[1].path, "/sys/fs/cgroup/memory/system.slice/walinuxagent.service")
+
+ def test_get_controllers_returns_only_mounted_controllers_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'cpu,cpuacct': '/sys/fs/cgroup/cpu,cpuacct'}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 1)
+ self.assertIsInstance(controllers[0], CpuControllerV1)
+ self.assertEqual(controllers[0].name, "walinuxagent")
+ self.assertEqual(controllers[0].path, "/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service")
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={'memory': '/sys/fs/cgroup/memory'}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 1)
+ self.assertIsInstance(controllers[0], MemoryControllerV1)
+ self.assertEqual(controllers[0].name, "walinuxagent")
+ self.assertEqual(controllers[0].path, "/sys/fs/cgroup/memory/system.slice/walinuxagent.service")
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 0)
+
+ def test_get_controllers_returns_only_controllers_at_expected_path_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'system.slice/walinuxagent.service', 'memory': 'unexpected/path'}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers(expected_relative_path="system.slice/walinuxagent.service")
+ self.assertEqual(len(controllers), 1)
+ self.assertIsInstance(controllers[0], CpuControllerV1)
+ self.assertEqual(controllers[0].name, "walinuxagent")
+ self.assertEqual(controllers[0].path, "/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service")
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_process_relative_controller_paths', return_value={'cpu,cpuacct': 'unexpected/path', 'memory': 'unexpected/path'}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers(expected_relative_path="system.slice/walinuxagent.service")
+ self.assertEqual(len(controllers), 0)
+
+ def test_get_procs_path_returns_correct_path_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ procs_path = cgroup.get_controller_procs_path(controller='cpu,cpuacct')
+ self.assertEqual(procs_path, "/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service/cgroup.procs")
+
+ procs_path = cgroup.get_controller_procs_path(controller='memory')
+ self.assertEqual(procs_path, "/sys/fs/cgroup/memory/system.slice/walinuxagent.service/cgroup.procs")
+
+ def test_get_processes_returns_processes_at_all_controller_paths_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ procs = cgroup.get_processes()
+ self.assertEqual(len(procs), 3)
+ self.assertIn(int(123), procs)
+ self.assertIn(int(234), procs)
+ self.assertIn(int(345), procs)
+
+ def test_get_processes_returns_empty_list_if_no_controllers_mounted_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1._get_controller_mountpoints', return_value={}):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ procs = cgroup.get_processes()
+ self.assertIsInstance(procs, list)
+ self.assertEqual(len(procs), 0)
+
+ def test_get_processes_returns_empty_list_if_procs_path_empty_v1(self):
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.CgroupV1.get_controller_procs_path', return_value=""):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ procs = cgroup.get_processes()
+ self.assertIsInstance(procs, list)
+ self.assertEqual(len(procs), 0)
+
+
+class CgroupsApiv2TestCase(AgentTestCase):
+ def test_get_supported_controllers_returns_v2_controllers(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_supported_controller_names()
+ self.assertEqual(len(controllers), 2)
+ self.assertIn('cpu', controllers)
+ self.assertIn('memory', controllers)
+
+ def test_check_in_expected_slice_returns_True_if_cgroup_path_in_expected_slice(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertTrue(cgroup.check_in_expected_slice(expected_slice='system.slice'))
+
+ def test_check_in_expected_slice_returns_False_if_cgroup_path_not_in_expected_slice(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertFalse(cgroup.check_in_expected_slice(expected_slice='user.slice'))
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_process_relative_cgroup_path', return_value=""):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ self.assertFalse(cgroup.check_in_expected_slice(expected_slice='system.slice'))
+
+ def test_get_controllers_returns_all_supported_controllers_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 2)
+ self.assertIsInstance(controllers[0], CpuControllerV2)
+ self.assertEqual(controllers[0].name, "walinuxagent")
+ self.assertEqual(controllers[0].path, "/sys/fs/cgroup/system.slice/walinuxagent.service")
+ self.assertIsInstance(controllers[1], MemoryControllerV2)
+ self.assertEqual(controllers[1].name, "walinuxagent")
+ self.assertEqual(controllers[1].path, "/sys/fs/cgroup/system.slice/walinuxagent.service")
+
+ def test_get_controllers_returns_only_enabled_controllers_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=["cpu"]):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 1)
+ self.assertIsInstance(controllers[0], CpuControllerV2)
+ self.assertEqual(controllers[0].name, "walinuxagent")
+ self.assertEqual(controllers[0].path, "/sys/fs/cgroup/system.slice/walinuxagent.service")
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=["memory"]):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 1)
+ self.assertIsInstance(controllers[0], MemoryControllerV2)
+ self.assertEqual(controllers[0].name, "walinuxagent")
+ self.assertEqual(controllers[0].path, "/sys/fs/cgroup/system.slice/walinuxagent.service")
+
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_controllers_enabled_at_root', return_value=[]):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 0)
+
+ def test_get_controllers_returns_empty_if_cgroup_path_is_empty_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ mock_cgroup_empty_path = CgroupV2(cgroup_name="test", root_cgroup_path="/sys/fs/cgroup", cgroup_path="", enabled_controllers=["cpu", "memory"])
+ with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup", return_value=mock_cgroup_empty_path):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers()
+ self.assertEqual(len(controllers), 0)
+
+ def test_get_controllers_returns_only_controllers_at_expected_path_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ mock_cgroup_unexpected_path = CgroupV2(cgroup_name="test", root_cgroup_path="/sys/fs/cgroup", cgroup_path="/sys/fs/cgroup/unexpected/path", enabled_controllers=["cpu", "memory"])
+ with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup", return_value=mock_cgroup_unexpected_path):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ controllers = cgroup.get_controllers(expected_relative_path="system.slice/walinuxagent.service")
+ self.assertEqual(len(controllers), 0)
+
+ def test_get_procs_path_returns_empty_if_root_cgroup_empty_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ procs_path = cgroup.get_procs_path()
+ self.assertEqual(procs_path, "")
+
+ def test_get_procs_path_returns_correct_path_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ procs_path = cgroup.get_procs_path()
+ self.assertEqual(procs_path, "/sys/fs/cgroup/system.slice/walinuxagent.service/cgroup.procs")
+
+ def test_get_processes_returns_processes_at_all_controller_paths_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ procs = cgroup.get_processes()
+ self.assertEqual(len(procs), 3)
+ self.assertIn(int(123), procs)
+ self.assertIn(int(234), procs)
+ self.assertIn(int(345), procs)
+
+ def test_get_processes_returns_empty_list_if_root_cgroup_empty_v2(self):
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ with patch('azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2._get_root_cgroup_path', return_value=""):
+ cgroup = get_cgroup_api().get_process_cgroup(process_id="self", cgroup_name="walinuxagent")
+ procs = cgroup.get_processes()
+ self.assertEqual(len(procs), 0)
diff --git a/tests/ga/test_cgroupconfigurator.py b/tests/ga/test_cgroupconfigurator.py
index 5b4b0976e..1ea7d9325 100644
--- a/tests/ga/test_cgroupconfigurator.py
+++ b/tests/ga/test_cgroupconfigurator.py
@@ -27,13 +27,14 @@
import threading
from azurelinuxagent.common import conf
-from azurelinuxagent.ga.cgroup import AGENT_NAME_TELEMETRY, MetricsCounter, MetricValue, MetricsCategory, CpuCgroup
+from azurelinuxagent.ga.cgroupcontroller import AGENT_NAME_TELEMETRY, MetricsCounter, MetricValue, MetricsCategory
from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator, DisableCgroups
from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.event import WALAEventOperation
from azurelinuxagent.common.exception import CGroupsException, AgentMemoryExceededException
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.utils import shellutil, fileutil
+from azurelinuxagent.ga.cpucontroller import CpuControllerV1
from tests.lib.mock_environment import MockCommand
from tests.lib.mock_cgroup_environment import mock_cgroup_v1_environment, UnitFilePaths, mock_cgroup_v2_environment
from tests.lib.tools import AgentTestCase, patch, mock_sleep, data_dir, is_python_version_26_or_34, skip_if_predicate_true
@@ -220,26 +221,20 @@ def test_initialize_should_create_unit_files_when_the_agent_service_file_is_not_
self.assertTrue(os.path.exists(agent_drop_in_file_cpu_accounting), "{0} was not created".format(agent_drop_in_file_cpu_accounting))
self.assertTrue(os.path.exists(agent_drop_in_file_memory_accounting), "{0} was not created".format(agent_drop_in_file_memory_accounting))
- def test_initialize_should_update_logcollector_memorylimit(self):
+ def test_initialize_should_clear_logcollector_slice(self):
with self._get_cgroup_configurator(initialize=False) as configurator:
log_collector_unit_file = configurator.mocks.get_mapped_path(UnitFilePaths.logcollector)
- original_memory_limit = "MemoryLimit=30M"
- # The mock creates the slice unit file with memory limit
+ # The mock creates the slice unit file
configurator.mocks.add_data_file(os.path.join(data_dir, 'init', "azure-walinuxagent-logcollector.slice"),
UnitFilePaths.logcollector)
- if not os.path.exists(log_collector_unit_file):
- raise Exception("{0} should have been created during test setup".format(log_collector_unit_file))
- if not fileutil.findre_in_file(log_collector_unit_file, original_memory_limit):
- raise Exception("MemoryLimit was not set correctly. Expected: {0}. Got:\n{1}".format(
- original_memory_limit, fileutil.read_file(log_collector_unit_file)))
+
+ self.assertTrue(os.path.exists(log_collector_unit_file), "{0} was not created".format(log_collector_unit_file))
configurator.initialize()
- # initialize() should update the unit file to remove the memory limit
- self.assertFalse(fileutil.findre_in_file(log_collector_unit_file, original_memory_limit),
- "Log collector slice unit file was not updated correctly. Expected no memory limit. Got:\n{0}".format(
- fileutil.read_file(log_collector_unit_file)))
+ # initialize() should remove the unit file
+ self.assertFalse(os.path.exists(log_collector_unit_file), "{0} should not have been created".format(log_collector_unit_file))
def test_setup_extension_slice_should_create_unit_files(self):
with self._get_cgroup_configurator() as configurator:
@@ -272,7 +267,7 @@ def test_remove_extension_slice_should_remove_unit_files(self):
CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/' \
'azure-vmextensions-Microsoft.CPlat.Extension.slice'] = \
- CpuCgroup('Microsoft.CPlat.Extension',
+ CpuControllerV1('Microsoft.CPlat.Extension',
'/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/azure-vmextensions-Microsoft.CPlat.Extension.slice')
configurator.remove_extension_slice(extension_name="Microsoft.CPlat.Extension")
@@ -369,10 +364,10 @@ def test_disable_should_reset_cpu_quota_for_all_cgroups(self):
configurator.setup_extension_slice(extension_name=extension_name, cpu_quota=5)
configurator.set_extension_services_cpu_memory_quota(service_list)
CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'] = \
- CpuCgroup('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service')
+ CpuControllerV1('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service')
CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/' \
'azure-vmextensions-Microsoft.CPlat.Extension.slice'] = \
- CpuCgroup('Microsoft.CPlat.Extension',
+ CpuControllerV1('Microsoft.CPlat.Extension',
'/sys/fs/cgroup/cpu,cpuacct/azure.slice/azure-vmextensions.slice/azure-vmextensions-Microsoft.CPlat.Extension.slice')
configurator.disable("UNIT TEST", DisableCgroups.ALL)
@@ -717,7 +712,8 @@ def test_it_should_stop_tracking_extension_services_cgroups(self):
with self._get_cgroup_configurator() as configurator:
with patch("os.path.exists") as mock_path:
mock_path.return_value = True
- CGroupsTelemetry.track_cgroup(CpuCgroup('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'))
+ CGroupsTelemetry.track_cgroup_controller(
+ CpuControllerV1('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'))
configurator.stop_tracking_extension_services_cgroups(service_list)
tracked = CGroupsTelemetry._tracked
@@ -776,7 +772,7 @@ def side_effect(path):
with patch("os.path.exists") as mock_path:
mock_path.side_effect = side_effect
CGroupsTelemetry._tracked['/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service'] = \
- CpuCgroup('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service')
+ CpuControllerV1('extension.service', '/sys/fs/cgroup/cpu,cpuacct/system.slice/extension.service')
configurator.stop_tracking_unit_cgroups("extension.service")
tracked = CGroupsTelemetry._tracked
@@ -911,7 +907,7 @@ def get_completed_process():
agent_processes = [os.getppid(), os.getpid()] + agent_command_processes + [start_extension.systemd_run_pid]
other_processes = [1, get_completed_process()] + extension_processes
- with patch("azurelinuxagent.ga.cgroupapi._SystemdCgroupApi.get_processes_in_cgroup", return_value=agent_processes + other_processes):
+ with patch("azurelinuxagent.ga.cgroupapi.CgroupV1.get_processes", return_value=agent_processes + other_processes):
with self.assertRaises(CGroupsException) as context_manager:
configurator._check_processes_in_agent_cgroup()
@@ -1012,8 +1008,15 @@ def test_check_agent_memory_usage_should_raise_a_cgroups_exception_when_the_limi
with self.assertRaises(AgentMemoryExceededException) as context_manager:
with self._get_cgroup_configurator() as configurator:
- with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_tracked_metrics") as tracked_metrics:
+ with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_tracked_metrics") as tracked_metrics:
tracked_metrics.return_value = metrics
configurator.check_agent_memory_usage()
self.assertIn("The agent memory limit {0} bytes exceeded".format(conf.get_agent_memory_quota()), ustr(context_manager.exception), "An incorrect exception was raised")
+
+ def test_get_log_collector_properties_should_return_correct_props(self):
+ with self._get_cgroup_configurator() as configurator:
+ self.assertEqual(configurator.get_logcollector_unit_properties(), ["--property=CPUAccounting=yes", "--property=MemoryAccounting=yes", "--property=CPUQuota=5%"])
+
+ with self._get_cgroup_configurator_v2() as configurator:
+ self.assertEqual(configurator.get_logcollector_unit_properties(), ["--property=CPUAccounting=yes", "--property=MemoryAccounting=yes", "--property=CPUQuota=5%", "--property=MemoryHigh=170M"])
diff --git a/tests/ga/test_cgroupcontroller.py b/tests/ga/test_cgroupcontroller.py
new file mode 100644
index 000000000..a01237e96
--- /dev/null
+++ b/tests/ga/test_cgroupcontroller.py
@@ -0,0 +1,55 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.4+ and Openssl 1.0+
+#
+
+from __future__ import print_function
+
+import os
+import random
+
+from azurelinuxagent.ga.cgroupcontroller import _CgroupController
+from tests.lib.tools import AgentTestCase, patch
+
+
+def consume_cpu_time():
+ waste = 0
+ for x in range(1, 200000): # pylint: disable=unused-variable
+ waste += random.random()
+ return waste
+
+
+class TestCgroupController(AgentTestCase):
+ def test_is_active(self):
+ test_metrics = _CgroupController("test_extension", self.tmp_dir)
+
+ with open(os.path.join(self.tmp_dir, "cgroup.procs"), mode="wb") as tasks:
+ tasks.write(str(1000).encode())
+
+ self.assertEqual(True, test_metrics.is_active())
+
+ @patch("azurelinuxagent.common.logger.periodic_warn")
+ def test_is_active_file_not_present(self, patch_periodic_warn):
+ test_metrics = _CgroupController("test_extension", self.tmp_dir)
+ self.assertFalse(test_metrics.is_active())
+
+ self.assertEqual(0, patch_periodic_warn.call_count)
+
+ @patch("azurelinuxagent.common.logger.periodic_warn")
+ def test_is_active_incorrect_file(self, patch_periodic_warn):
+ open(os.path.join(self.tmp_dir, "cgroup.procs"), mode="wb").close()
+ test_metrics = _CgroupController("test_extension", os.path.join(self.tmp_dir, "cgroup.procs"))
+ self.assertEqual(False, test_metrics.is_active())
+ self.assertEqual(1, patch_periodic_warn.call_count)
diff --git a/tests/ga/test_cgroups.py b/tests/ga/test_cgroups.py
deleted file mode 100644
index 0ffcfed1b..000000000
--- a/tests/ga/test_cgroups.py
+++ /dev/null
@@ -1,242 +0,0 @@
-# Copyright 2018 Microsoft Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Requires Python 2.4+ and Openssl 1.0+
-#
-
-from __future__ import print_function
-
-import errno
-import os
-import random
-import shutil
-
-from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup, MetricsCounter, CounterNotFound
-from azurelinuxagent.common.exception import CGroupsException
-from azurelinuxagent.common.osutil import get_osutil
-from azurelinuxagent.common.utils import fileutil
-from tests.lib.tools import AgentTestCase, patch, data_dir
-
-
-def consume_cpu_time():
- waste = 0
- for x in range(1, 200000): # pylint: disable=unused-variable
- waste += random.random()
- return waste
-
-
-class TestCGroup(AgentTestCase):
- def test_is_active(self):
- test_cgroup = CpuCgroup("test_extension", self.tmp_dir)
- self.assertEqual(False, test_cgroup.is_active())
-
- with open(os.path.join(self.tmp_dir, "tasks"), mode="wb") as tasks:
- tasks.write(str(1000).encode())
-
- self.assertEqual(True, test_cgroup.is_active())
-
- @patch("azurelinuxagent.common.logger.periodic_warn")
- def test_is_active_file_not_present(self, patch_periodic_warn):
- test_cgroup = CpuCgroup("test_extension", self.tmp_dir)
- self.assertEqual(False, test_cgroup.is_active())
-
- test_cgroup = MemoryCgroup("test_extension", os.path.join(self.tmp_dir, "this_cgroup_does_not_exist"))
- self.assertEqual(False, test_cgroup.is_active())
-
- self.assertEqual(0, patch_periodic_warn.call_count)
-
- @patch("azurelinuxagent.common.logger.periodic_warn")
- def test_is_active_incorrect_file(self, patch_periodic_warn):
- open(os.path.join(self.tmp_dir, "tasks"), mode="wb").close()
- test_cgroup = CpuCgroup("test_extension", os.path.join(self.tmp_dir, "tasks"))
- self.assertEqual(False, test_cgroup.is_active())
- self.assertEqual(1, patch_periodic_warn.call_count)
-
-
-class TestCpuCgroup(AgentTestCase):
- @classmethod
- def setUpClass(cls):
- AgentTestCase.setUpClass()
-
- original_read_file = fileutil.read_file
-
- #
- # Tests that need to mock the contents of /proc/stat or */cpuacct/stat can set this map from
- # the file that needs to be mocked to the mock file (each test starts with an empty map). If
- # an Exception is given instead of a path, the exception is raised
- #
- cls.mock_read_file_map = {}
-
- def mock_read_file(filepath, **args):
- if filepath in cls.mock_read_file_map:
- mapped_value = cls.mock_read_file_map[filepath]
- if isinstance(mapped_value, Exception):
- raise mapped_value
- filepath = mapped_value
- return original_read_file(filepath, **args)
-
- cls.mock_read_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file)
- cls.mock_read_file.start()
-
- @classmethod
- def tearDownClass(cls):
- cls.mock_read_file.stop()
- AgentTestCase.tearDownClass()
-
- def setUp(self):
- AgentTestCase.setUp(self)
- TestCpuCgroup.mock_read_file_map.clear()
-
- def test_initialize_cpu_usage_should_set_current_cpu_usage(self):
- cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
-
- TestCpuCgroup.mock_read_file_map = {
- "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"),
- os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
- }
-
- cgroup.initialize_cpu_usage()
-
- self.assertEqual(cgroup._current_cgroup_cpu, 63763)
- self.assertEqual(cgroup._current_system_cpu, 5496872)
-
- def test_get_cpu_usage_should_return_the_cpu_usage_since_its_last_invocation(self):
- osutil = get_osutil()
-
- cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
-
- TestCpuCgroup.mock_read_file_map = {
- "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"),
- os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
- }
-
- cgroup.initialize_cpu_usage()
-
- TestCpuCgroup.mock_read_file_map = {
- "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t1"),
- os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t1")
- }
-
- cpu_usage = cgroup.get_cpu_usage()
-
- self.assertEqual(cpu_usage, round(100.0 * 0.000307697876885 * osutil.get_processor_cores(), 3))
-
- TestCpuCgroup.mock_read_file_map = {
- "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t2"),
- os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t2")
- }
-
- cpu_usage = cgroup.get_cpu_usage()
-
- self.assertEqual(cpu_usage, round(100.0 * 0.000445181085968 * osutil.get_processor_cores(), 3))
-
- def test_initialize_cpu_usage_should_set_the_cgroup_usage_to_0_when_the_cgroup_does_not_exist(self):
- cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
-
- io_error_2 = IOError()
- io_error_2.errno = errno.ENOENT # "No such directory"
-
- TestCpuCgroup.mock_read_file_map = {
- "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"),
- os.path.join(cgroup.path, "cpuacct.stat"): io_error_2
- }
-
- cgroup.initialize_cpu_usage()
-
- self.assertEqual(cgroup._current_cgroup_cpu, 0)
- self.assertEqual(cgroup._current_system_cpu, 5496872) # check the system usage just for test sanity
-
- def test_initialize_cpu_usage_should_raise_an_exception_when_called_more_than_once(self):
- cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
-
- TestCpuCgroup.mock_read_file_map = {
- "/proc/stat": os.path.join(data_dir, "cgroups", "proc_stat_t0"),
- os.path.join(cgroup.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
- }
-
- cgroup.initialize_cpu_usage()
-
- with self.assertRaises(CGroupsException):
- cgroup.initialize_cpu_usage()
-
- def test_get_cpu_usage_should_raise_an_exception_when_initialize_cpu_usage_has_not_been_invoked(self):
- cgroup = CpuCgroup("test", "/sys/fs/cgroup/cpu/system.slice/test")
-
- with self.assertRaises(CGroupsException):
- cpu_usage = cgroup.get_cpu_usage() # pylint: disable=unused-variable
-
- def test_get_throttled_time_should_return_the_value_since_its_last_invocation(self):
- test_file = os.path.join(self.tmp_dir, "cpu.stat")
- shutil.copyfile(os.path.join(data_dir, "cgroups", "cpu.stat_t0"), test_file) # throttled_time = 50
- cgroup = CpuCgroup("test", self.tmp_dir)
- cgroup.initialize_cpu_usage()
- shutil.copyfile(os.path.join(data_dir, "cgroups", "cpu.stat_t1"), test_file) # throttled_time = 2075541442327
-
- throttled_time = cgroup.get_cpu_throttled_time()
-
- self.assertEqual(throttled_time, float(2075541442327 - 50) / 1E9, "The value of throttled_time is incorrect")
-
- def test_get_tracked_metrics_should_return_the_throttled_time(self):
- cgroup = CpuCgroup("test", os.path.join(data_dir, "cgroups"))
- cgroup.initialize_cpu_usage()
-
- def find_throttled_time(metrics):
- return [m for m in metrics if m.counter == MetricsCounter.THROTTLED_TIME]
-
- found = find_throttled_time(cgroup.get_tracked_metrics())
- self.assertTrue(len(found) == 0, "get_tracked_metrics should not fetch the throttled time by default. Found: {0}".format(found))
-
- found = find_throttled_time(cgroup.get_tracked_metrics(track_throttled_time=True))
- self.assertTrue(len(found) == 1, "get_tracked_metrics should have fetched the throttled time by default. Found: {0}".format(found))
-
-
-class TestMemoryCgroup(AgentTestCase):
- def test_get_metrics(self):
- test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "memory_mount"))
-
- memory_usage = test_mem_cg.get_memory_usage()
- self.assertEqual(150000, memory_usage)
-
- max_memory_usage = test_mem_cg.get_max_memory_usage()
- self.assertEqual(1000000, max_memory_usage)
-
- swap_memory_usage = test_mem_cg.try_swap_memory_usage()
- self.assertEqual(20000, swap_memory_usage)
-
- def test_get_metrics_when_files_not_present(self):
- test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups"))
-
- with self.assertRaises(IOError) as e:
- test_mem_cg.get_memory_usage()
-
- self.assertEqual(e.exception.errno, errno.ENOENT)
-
- with self.assertRaises(IOError) as e:
- test_mem_cg.get_max_memory_usage()
-
- self.assertEqual(e.exception.errno, errno.ENOENT)
-
- with self.assertRaises(IOError) as e:
- test_mem_cg.try_swap_memory_usage()
-
- self.assertEqual(e.exception.errno, errno.ENOENT)
-
- def test_get_memory_usage_counters_not_found(self):
- test_mem_cg = MemoryCgroup("test_extension", os.path.join(data_dir, "cgroups", "missing_memory_counters"))
-
- with self.assertRaises(CounterNotFound):
- test_mem_cg.get_memory_usage()
-
- swap_memory_usage = test_mem_cg.try_swap_memory_usage()
- self.assertEqual(0, swap_memory_usage)
diff --git a/tests/ga/test_cgroupstelemetry.py b/tests/ga/test_cgroupstelemetry.py
index 26fcecbf6..ab4e33048 100644
--- a/tests/ga/test_cgroupstelemetry.py
+++ b/tests/ga/test_cgroupstelemetry.py
@@ -19,9 +19,11 @@
import random
import time
-from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup
+from azurelinuxagent.ga.cgroupcontroller import MetricsCounter
from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.utils import fileutil
+from azurelinuxagent.ga.cpucontroller import CpuControllerV1
+from azurelinuxagent.ga.memorycontroller import MemoryControllerV1
from tests.lib.tools import AgentTestCase, data_dir, patch
@@ -80,9 +82,9 @@ def setUpClass(cls):
def mock_read_file(filepath, **args):
if filepath == "/proc/stat":
- filepath = os.path.join(data_dir, "cgroups", "proc_stat_t0")
+ filepath = os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0")
elif filepath.endswith("/cpuacct.stat"):
- filepath = os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
+ filepath = os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0")
return original_read_file(filepath, **args)
cls._mock_read_cpu_cgroup_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file)
@@ -103,76 +105,81 @@ def tearDown(self):
CGroupsTelemetry.reset()
@staticmethod
- def _track_new_extension_cgroups(num_extensions):
+ def _track_new_extension_cgroup_controllers(num_extensions):
for i in range(num_extensions):
- dummy_cpu_cgroup = CpuCgroup("dummy_extension_{0}".format(i), "dummy_cpu_path_{0}".format(i))
- CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup)
+ dummy_cpu_controller = CpuControllerV1("dummy_extension_{0}".format(i), "dummy_cpu_path_{0}".format(i))
+ CGroupsTelemetry.track_cgroup_controller(dummy_cpu_controller)
- dummy_memory_cgroup = MemoryCgroup("dummy_extension_{0}".format(i), "dummy_memory_path_{0}".format(i))
- CGroupsTelemetry.track_cgroup(dummy_memory_cgroup)
+ dummy_memory_controller = MemoryControllerV1("dummy_extension_{0}".format(i), "dummy_memory_path_{0}".format(i))
+ CGroupsTelemetry.track_cgroup_controller(dummy_memory_controller)
- def _assert_cgroups_are_tracked(self, num_extensions):
+ def _assert_cgroup_controllers_are_tracked(self, num_extensions):
for i in range(num_extensions):
self.assertTrue(CGroupsTelemetry.is_tracked("dummy_cpu_path_{0}".format(i)))
self.assertTrue(CGroupsTelemetry.is_tracked("dummy_memory_path_{0}".format(i)))
- def _assert_polled_metrics_equal(self, metrics, cpu_metric_value, memory_metric_value, max_memory_metric_value, swap_memory_value):
+ def _assert_polled_metrics_equal(self, metrics, cpu_metric_value, current_total_memory_metric_value, current_anon_memory_metric_value, current_cache_memory_metric_value, max_memory_metric_value, swap_memory_value):
for metric in metrics:
self.assertIn(metric.category, ["CPU", "Memory"])
if metric.category == "CPU":
self.assertEqual(metric.counter, "% Processor Time")
self.assertEqual(metric.value, cpu_metric_value)
if metric.category == "Memory":
- self.assertIn(metric.counter, ["Total Memory Usage", "Max Memory Usage", "Swap Memory Usage"])
- if metric.counter == "Total Memory Usage":
- self.assertEqual(metric.value, memory_metric_value)
- elif metric.counter == "Max Memory Usage":
+ self.assertIn(metric.counter, [MetricsCounter.TOTAL_MEM_USAGE, MetricsCounter.ANON_MEM_USAGE, MetricsCounter.CACHE_MEM_USAGE, MetricsCounter.MAX_MEM_USAGE, MetricsCounter.SWAP_MEM_USAGE])
+ if metric.counter == MetricsCounter.TOTAL_MEM_USAGE:
+ self.assertEqual(metric.value, current_total_memory_metric_value)
+ elif metric.counter == MetricsCounter.ANON_MEM_USAGE:
+ self.assertEqual(metric.value, current_anon_memory_metric_value)
+ elif metric.counter == MetricsCounter.CACHE_MEM_USAGE:
+ self.assertEqual(metric.value, current_cache_memory_metric_value)
+ elif metric.counter == MetricsCounter.MAX_MEM_USAGE:
self.assertEqual(metric.value, max_memory_metric_value)
- elif metric.counter == "Swap Memory Usage":
+ elif metric.counter == MetricsCounter.SWAP_MEM_USAGE:
self.assertEqual(metric.value, swap_memory_value)
def test_telemetry_polling_with_active_cgroups(self, *args): # pylint: disable=unused-argument
num_extensions = 3
- self._track_new_extension_cgroups(num_extensions)
-
- with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage:
- with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage:
- with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage:
- with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.try_swap_memory_usage") as patch_try_swap_memory_usage:
- with patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage") as patch_get_cpu_usage:
- with patch("azurelinuxagent.ga.cgroup.CGroup.is_active") as patch_is_active:
- patch_is_active.return_value = True
-
- current_cpu = 30
- current_memory = 209715200
- current_max_memory = 471859200
- current_swap_memory = 20971520
-
- # 1 CPU metric + 1 Current Memory + 1 Max memory + 1 swap memory
- num_of_metrics_per_extn_expected = 4
- patch_get_cpu_usage.return_value = current_cpu
- patch_get_memory_usage.return_value = current_memory # example 200 MB
- patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB
- patch_try_swap_memory_usage.return_value = current_swap_memory # example 20MB
- num_polls = 12
-
- for data_count in range(1, num_polls + 1): # pylint: disable=unused-variable
- metrics = CGroupsTelemetry.poll_all_tracked()
-
- self.assertEqual(len(metrics), num_extensions * num_of_metrics_per_extn_expected)
- self._assert_polled_metrics_equal(metrics, current_cpu, current_memory, current_max_memory, current_swap_memory)
-
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage", side_effect=raise_ioerror)
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage", side_effect=raise_ioerror)
- @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage", side_effect=raise_ioerror)
- @patch("azurelinuxagent.ga.cgroup.CGroup.is_active", return_value=False)
+ self._track_new_extension_cgroup_controllers(num_extensions)
+
+ with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage") as patch_get_memory_max_usage:
+ with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage") as patch_get_memory_usage:
+ with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.try_swap_memory_usage") as patch_try_swap_memory_usage:
+ with patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage") as patch_get_cpu_usage:
+ with patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") as patch_is_active:
+ patch_is_active.return_value = True
+
+ current_cpu = 30
+ current_anon_memory = 209715200
+ current_cache_memory = 314572800
+ current_total_memory = 209715200 + 314572800
+ current_max_memory = 471859200
+ current_swap_memory = 20971520
+
+ # 1 CPU metric + 1 total Memory + 1 anon memory + 1 cache memory + 1 Max memory + 1 swap memory
+ num_of_metrics_per_extn_expected = 6
+ patch_get_cpu_usage.return_value = current_cpu
+ patch_get_memory_usage.return_value = current_anon_memory, current_cache_memory # example 200 MB, 300 MB
+ patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB
+ patch_try_swap_memory_usage.return_value = current_swap_memory # example 20MB
+ num_polls = 18
+
+ for data_count in range(1, num_polls + 1): # pylint: disable=unused-variable
+ metrics = CGroupsTelemetry.poll_all_tracked()
+
+ self.assertEqual(len(metrics), num_extensions * num_of_metrics_per_extn_expected)
+ self._assert_polled_metrics_equal(metrics, current_cpu, current_total_memory, current_anon_memory, current_cache_memory, current_max_memory, current_swap_memory)
+
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage", side_effect=raise_ioerror)
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage", side_effect=raise_ioerror)
+ @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage", side_effect=raise_ioerror)
+ @patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active", return_value=False)
def test_telemetry_polling_with_inactive_cgroups(self, *_):
num_extensions = 5
no_extensions_expected = 0 # pylint: disable=unused-variable
- self._track_new_extension_cgroups(num_extensions)
- self._assert_cgroups_are_tracked(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
+ self._assert_cgroup_controllers_are_tracked(num_extensions)
metrics = CGroupsTelemetry.poll_all_tracked()
@@ -182,14 +189,14 @@ def test_telemetry_polling_with_inactive_cgroups(self, *_):
self.assertEqual(len(metrics), 0)
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage")
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage")
- @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage")
- @patch("azurelinuxagent.ga.cgroup.CGroup.is_active")
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage")
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage")
+ @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage")
+ @patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active")
def test_telemetry_polling_with_changing_cgroups_state(self, patch_is_active, patch_get_cpu_usage, # pylint: disable=unused-argument
patch_get_mem, patch_get_max_mem, *args):
num_extensions = 5
- self._track_new_extension_cgroups(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
patch_is_active.return_value = True
@@ -197,17 +204,18 @@ def test_telemetry_polling_with_changing_cgroups_state(self, patch_is_active, pa
expected_data_count = 1 # pylint: disable=unused-variable
current_cpu = 30
- current_memory = 209715200
+ current_anon_memory = 104857600
+ current_cache_memory = 104857600
current_max_memory = 471859200
patch_get_cpu_usage.return_value = current_cpu
- patch_get_mem.return_value = current_memory # example 200 MB
+ patch_get_mem.return_value = current_anon_memory, current_cache_memory # example 100 MB, 100 MB
patch_get_max_mem.return_value = current_max_memory # example 450 MB
- self._assert_cgroups_are_tracked(num_extensions)
+ self._assert_cgroup_controllers_are_tracked(num_extensions)
CGroupsTelemetry.poll_all_tracked()
- self._assert_cgroups_are_tracked(num_extensions)
+ self._assert_cgroup_controllers_are_tracked(num_extensions)
patch_is_active.return_value = False
patch_get_cpu_usage.side_effect = raise_ioerror
@@ -225,7 +233,7 @@ def test_telemetry_polling_with_changing_cgroups_state(self, patch_is_active, pa
@patch("azurelinuxagent.common.logger.periodic_warn")
def test_telemetry_polling_to_not_generate_transient_logs_ioerror_file_not_found(self, patch_periodic_warn):
num_extensions = 1
- self._track_new_extension_cgroups(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
self.assertEqual(0, patch_periodic_warn.call_count)
# Not expecting logs present for io_error with errno=errno.ENOENT
@@ -243,7 +251,7 @@ def test_telemetry_polling_to_generate_transient_logs_ioerror_permission_denied(
num_extensions = 1
num_controllers = 1
is_active_check_per_controller = 2
- self._track_new_extension_cgroups(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
self.assertEqual(0, patch_periodic_warn.call_count)
@@ -254,7 +262,7 @@ def test_telemetry_polling_to_generate_transient_logs_ioerror_permission_denied(
with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=io_error_3):
poll_count = 1
expected_count_per_call = num_controllers + is_active_check_per_controller
- # get_max_memory_usage memory controller would generate a log statement, and each cgroup would invoke a
+ # get_cpu_usage cpu controller would generate a log statement, and each cgroup controller would invoke a
# is active check raising an exception
for data_count in range(poll_count, 10): # pylint: disable=unused-variable
@@ -263,23 +271,23 @@ def test_telemetry_polling_to_generate_transient_logs_ioerror_permission_denied(
def test_telemetry_polling_to_generate_transient_logs_index_error(self):
num_extensions = 1
- self._track_new_extension_cgroups(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
# Generating a different kind of error (non-IOError) to check the logging.
# Trying to invoke IndexError during the getParameter call
with patch("azurelinuxagent.common.utils.fileutil.read_file", return_value=''):
with patch("azurelinuxagent.common.logger.periodic_warn") as patch_periodic_warn:
- expected_call_count = 1 # 1 periodic warning for memory
+ expected_call_count = 1 # 1 periodic warning for cpu
for data_count in range(1, 10): # pylint: disable=unused-variable
CGroupsTelemetry.poll_all_tracked()
self.assertEqual(expected_call_count, patch_periodic_warn.call_count)
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.try_swap_memory_usage")
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage")
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage")
- @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage")
- @patch("azurelinuxagent.ga.cgroup.CGroup.is_active")
- def test_telemetry_calculations(self, patch_is_active, patch_get_cpu_usage, patch_get_memory_usage, patch_get_memory_max_usage, patch_try_memory_swap_usage,
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.try_swap_memory_usage")
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage")
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage")
+ @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage")
+ @patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active")
+ def test_telemetry_calculations(self, patch_is_active, patch_get_cpu_usage, patch_get_memory_usage, patch_get_memory_max_usage, patch_try_memory_swap_usage,
*args): # pylint: disable=unused-argument
num_polls = 10
num_extensions = 1
@@ -287,47 +295,48 @@ def test_telemetry_calculations(self, patch_is_active, patch_get_cpu_usage, pat
cpu_percent_values = [random.randint(0, 100) for _ in range(num_polls)]
# only verifying calculations and not validity of the values.
- memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)]
+ anon_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)]
+ cache_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)]
max_memory_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)]
swap_usage_values = [random.randint(0, 8 * 1024 ** 3) for _ in range(num_polls)]
- self._track_new_extension_cgroups(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
self.assertEqual(2 * num_extensions, len(CGroupsTelemetry._tracked))
for i in range(num_polls):
patch_is_active.return_value = True
patch_get_cpu_usage.return_value = cpu_percent_values[i]
- patch_get_memory_usage.return_value = memory_usage_values[i]
+ patch_get_memory_usage.return_value = anon_usage_values[i], cache_usage_values[i]
patch_get_memory_max_usage.return_value = max_memory_usage_values[i]
patch_try_memory_swap_usage.return_value = swap_usage_values[i]
metrics = CGroupsTelemetry.poll_all_tracked()
- # 1 CPU metric + 1 Current Memory + 1 Max memory + 1 swap memory
- self.assertEqual(len(metrics), 4 * num_extensions)
- self._assert_polled_metrics_equal(metrics, cpu_percent_values[i], memory_usage_values[i], max_memory_usage_values[i], swap_usage_values[i])
+ # 1 CPU metric + 1 Total Memory + 1 anon memory + 1 cache memory + 1 Max memory + 1 swap memory
+ self.assertEqual(len(metrics), 6 * num_extensions)
+ self._assert_polled_metrics_equal(metrics, cpu_percent_values[i], anon_usage_values[i] + cache_usage_values[i], anon_usage_values[i], cache_usage_values[i], max_memory_usage_values[i], swap_usage_values[i])
def test_cgroup_tracking(self, *args): # pylint: disable=unused-argument
num_extensions = 5
num_controllers = 2
- self._track_new_extension_cgroups(num_extensions)
- self._assert_cgroups_are_tracked(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
+ self._assert_cgroup_controllers_are_tracked(num_extensions)
self.assertEqual(num_extensions * num_controllers, len(CGroupsTelemetry._tracked))
def test_cgroup_is_tracked(self, *args): # pylint: disable=unused-argument
num_extensions = 5
- self._track_new_extension_cgroups(num_extensions)
- self._assert_cgroups_are_tracked(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
+ self._assert_cgroup_controllers_are_tracked(num_extensions)
self.assertFalse(CGroupsTelemetry.is_tracked("not_present_cpu_dummy_path"))
self.assertFalse(CGroupsTelemetry.is_tracked("not_present_memory_dummy_path"))
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage", side_effect=raise_ioerror)
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage", side_effect=raise_ioerror)
def test_process_cgroup_metric_with_no_memory_cgroup_mounted(self, *args): # pylint: disable=unused-argument
num_extensions = 5
- self._track_new_extension_cgroups(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
- with patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage") as patch_get_cpu_usage:
- with patch("azurelinuxagent.ga.cgroup.CGroup.is_active") as patch_is_active:
+ with patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage") as patch_get_cpu_usage:
+ with patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") as patch_is_active:
patch_is_active.return_value = True
current_cpu = 30
@@ -339,42 +348,44 @@ def test_process_cgroup_metric_with_no_memory_cgroup_mounted(self, *args): # py
metrics = CGroupsTelemetry.poll_all_tracked()
self.assertEqual(len(metrics), num_extensions * 1) # Only CPU populated
- self._assert_polled_metrics_equal(metrics, current_cpu, 0, 0, 0)
+ self._assert_polled_metrics_equal(metrics, current_cpu, 0, 0, 0, 0, 0)
- @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage", side_effect=raise_ioerror)
+ @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage", side_effect=raise_ioerror)
def test_process_cgroup_metric_with_no_cpu_cgroup_mounted(self, *args): # pylint: disable=unused-argument
num_extensions = 5
- self._track_new_extension_cgroups(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
- with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage") as patch_get_memory_max_usage:
- with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage") as patch_get_memory_usage:
- with patch("azurelinuxagent.ga.cgroup.MemoryCgroup.try_swap_memory_usage") as patch_try_swap_memory_usage:
- with patch("azurelinuxagent.ga.cgroup.CGroup.is_active") as patch_is_active:
+ with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage") as patch_get_memory_max_usage:
+ with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage") as patch_get_memory_usage:
+ with patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.try_swap_memory_usage") as patch_try_swap_memory_usage:
+ with patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") as patch_is_active:
patch_is_active.return_value = True
- current_memory = 209715200
+ current_total_memory = 209715200
+ current_anon_memory = 104857600
+ current_cache_memory = 104857600
current_max_memory = 471859200
current_swap_memory = 20971520
- patch_get_memory_usage.return_value = current_memory # example 200 MB
+ patch_get_memory_usage.return_value = current_anon_memory, current_cache_memory # example 100 MB, 100 MB
patch_get_memory_max_usage.return_value = current_max_memory # example 450 MB
patch_try_swap_memory_usage.return_value = current_swap_memory # example 20MB
num_polls = 10
for data_count in range(1, num_polls + 1): # pylint: disable=unused-variable
metrics = CGroupsTelemetry.poll_all_tracked()
- # Memory is only populated, CPU is not. Thus 3 metrics for memory.
- self.assertEqual(len(metrics), num_extensions * 3)
- self._assert_polled_metrics_equal(metrics, 0, current_memory, current_max_memory, current_swap_memory)
+ # Memory is only populated, CPU is not. Thus 5 metrics for memory.
+ self.assertEqual(len(metrics), num_extensions * 5)
+ self._assert_polled_metrics_equal(metrics, 0, current_total_memory, current_anon_memory, current_cache_memory, current_max_memory, current_swap_memory)
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage", side_effect=raise_ioerror)
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_max_memory_usage", side_effect=raise_ioerror)
- @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage", side_effect=raise_ioerror)
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage", side_effect=raise_ioerror)
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_max_memory_usage", side_effect=raise_ioerror)
+ @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage", side_effect=raise_ioerror)
def test_extension_telemetry_not_sent_for_empty_perf_metrics(self, *args): # pylint: disable=unused-argument
num_extensions = 5
- self._track_new_extension_cgroups(num_extensions)
+ self._track_new_extension_cgroup_controllers(num_extensions)
- with patch("azurelinuxagent.ga.cgroup.CGroup.is_active") as patch_is_active:
+ with patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active") as patch_is_active:
patch_is_active.return_value = False
poll_count = 1
@@ -383,9 +394,9 @@ def test_extension_telemetry_not_sent_for_empty_perf_metrics(self, *args): # py
metrics = CGroupsTelemetry.poll_all_tracked()
self.assertEqual(0, len(metrics))
- @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage")
- @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_throttled_time")
- @patch("azurelinuxagent.ga.cgroup.CGroup.is_active")
+ @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage")
+ @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_throttled_time")
+ @patch("azurelinuxagent.ga.cgroupcontroller._CgroupController.is_active")
def test_cgroup_telemetry_should_not_report_cpu_negative_value(self, patch_is_active, path_get_throttled_time, patch_get_cpu_usage):
num_polls = 5
@@ -396,8 +407,8 @@ def test_cgroup_telemetry_should_not_report_cpu_negative_value(self, patch_is_ac
cpu_percent_values.append(-1)
cpu_throttled_values = [random.randint(0, 60 * 60) for _ in range(num_polls)]
- dummy_cpu_cgroup = CpuCgroup("dummy_extension_name", "dummy_cpu_path")
- CGroupsTelemetry.track_cgroup(dummy_cpu_cgroup)
+ dummy_cpu_cgroup = CpuControllerV1("dummy_extension_name", "dummy_cpu_path")
+ CGroupsTelemetry.track_cgroup_controller(dummy_cpu_cgroup)
self.assertEqual(1, len(CGroupsTelemetry._tracked))
for i in range(num_polls):
diff --git a/tests/ga/test_collect_logs.py b/tests/ga/test_collect_logs.py
index 4ac3f03fb..458cd2e69 100644
--- a/tests/ga/test_collect_logs.py
+++ b/tests/ga/test_collect_logs.py
@@ -18,13 +18,15 @@
import os
from azurelinuxagent.common import logger, conf
-from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup, MetricValue
+from azurelinuxagent.ga.cgroupcontroller import MetricValue, MetricsCounter
from azurelinuxagent.ga.cgroupconfigurator import CGroupConfigurator
from azurelinuxagent.common.logger import Logger
from azurelinuxagent.common.protocol.util import ProtocolUtil
from azurelinuxagent.common.utils import fileutil
from azurelinuxagent.ga.collect_logs import get_collect_logs_handler, is_log_collection_allowed, \
get_log_collector_monitor_handler
+from azurelinuxagent.ga.cpucontroller import CpuControllerV1, CpuControllerV2
+from azurelinuxagent.ga.memorycontroller import MemoryControllerV1, MemoryControllerV2
from tests.lib.mock_wire_protocol import mock_wire_protocol, MockHttpResponse
from tests.lib.http_request_predicates import HttpRequestPredicates
from tests.lib.wire_protocol_data import DATA_FILE
@@ -32,8 +34,13 @@
is_python_version_26, data_dir
+class CgroupVersions:
+ V1 = "v1"
+ V2 = "v2"
+
+
@contextlib.contextmanager
-def _create_collect_logs_handler(iterations=1, cgroups_enabled=True, collect_logs_conf=True):
+def _create_collect_logs_handler(iterations=1, cgroup_version=CgroupVersions.V1, cgroups_enabled=True, collect_logs_conf=True, cgroupv2_resource_limiting_conf=False):
"""
Creates an instance of CollectLogsHandler that
* Uses a mock_wire_protocol for network requests,
@@ -52,19 +59,33 @@ def _create_collect_logs_handler(iterations=1, cgroups_enabled=True, collect_log
with patch("azurelinuxagent.ga.collect_logs.CollectLogsHandler.stopped",
side_effect=[False] * iterations + [True]):
with patch("time.sleep"):
- # Grab the singleton to patch it
- cgroups_configurator_singleton = CGroupConfigurator.get_instance()
- with patch.object(cgroups_configurator_singleton, "enabled", return_value=cgroups_enabled):
- with patch("azurelinuxagent.ga.collect_logs.conf.get_collect_logs",
- return_value=collect_logs_conf):
- def run_and_wait():
- collect_logs_handler.run()
- collect_logs_handler.join()
-
- collect_logs_handler = get_collect_logs_handler()
- collect_logs_handler.get_mock_wire_protocol = lambda: protocol
- collect_logs_handler.run_and_wait = run_and_wait
- yield collect_logs_handler
+ with patch("azurelinuxagent.ga.collect_logs.conf.get_collect_logs", return_value=collect_logs_conf):
+
+ # Grab the singleton to patch it
+ cgroups_configurator_singleton = CGroupConfigurator.get_instance()
+
+ if cgroup_version == CgroupVersions.V1:
+ with patch.object(cgroups_configurator_singleton, "enabled", return_value=cgroups_enabled):
+ def run_and_wait():
+ collect_logs_handler.run()
+ collect_logs_handler.join()
+
+ collect_logs_handler = get_collect_logs_handler()
+ collect_logs_handler.get_mock_wire_protocol = lambda: protocol
+ collect_logs_handler.run_and_wait = run_and_wait
+ yield collect_logs_handler
+ else:
+ with patch("azurelinuxagent.ga.collect_logs.conf.get_enable_cgroup_v2_resource_limiting", return_value=cgroupv2_resource_limiting_conf):
+ with patch.object(cgroups_configurator_singleton, "enabled", return_value=False):
+ with patch("azurelinuxagent.ga.cgroupconfigurator.CGroupConfigurator._Impl.using_cgroup_v2", return_value=True):
+ def run_and_wait():
+ collect_logs_handler.run()
+ collect_logs_handler.join()
+
+ collect_logs_handler = get_collect_logs_handler()
+ collect_logs_handler.get_mock_wire_protocol = lambda: protocol
+ collect_logs_handler.run_and_wait = run_and_wait
+ yield collect_logs_handler
@skip_if_predicate_true(is_python_version_26, "Disabled on Python 2.6")
@@ -101,26 +122,124 @@ def _create_dummy_archive(self, size=1024):
def test_it_should_only_collect_logs_if_conditions_are_met(self):
# In order to collect logs, three conditions have to be met:
- # 1) the flag must be set to true in the conf file
- # 2) cgroups must be managing services
- # 3) python version 2.7+ which is automatically true for these tests since they are disabled on py2.6
+ # 1) It should be enabled in the configuration.
+ # 2) The system must be using cgroups to manage services - needed for resource limiting of the log collection. The
+ # agent currently fully supports resource limiting for v1, but only supports log collector resource limiting for v2
+ # if enabled via configuration.
+ # This condition is True if either:
+ # a. cgroup usage in the agent is enabled; OR
+ # b. the machine is using cgroup v2 and v2 resource limiting is enabled in the configuration.
+ # 3) The python version must be greater than 2.6 in order to support the ZipFile library used when collecting.
+
+ # Note, cgroups should not be in an 'enabled' state in the configurator if v2 is in use. Resource governance is
+ # not fully supported on v2 yet.
+
+ # If collect logs is not enabled in the configuration, then log collection should always be disabled
+
+ # Case 1:
+ # - Cgroups are enabled in the configurator
+ # - Cgroup v2 is not in use
+ # - Cgroup v2 resource limiting conf is True
+ # - collect logs config flag false
+ with _create_collect_logs_handler(cgroups_enabled=True, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=True, collect_logs_conf=False):
+ self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
+
+ # Case 2:
+ # - Cgroups are enabled in the configurator
+ # - Cgroup v2 is not in use
+ # - Cgroup v2 resource limiting conf is False
+ # - collect logs config flag false
+ with _create_collect_logs_handler(cgroups_enabled=True, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=False, collect_logs_conf=False):
+ self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
+
+ # Case 3:
+ # - Cgroups are disabled in the configurator
+ # - Cgroup v2 is in use
+ # - Cgroup v2 resource limiting conf is True
+ # - collect logs config flag false
+ with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V2, cgroupv2_resource_limiting_conf=True, collect_logs_conf=False):
+ self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
- # cgroups not enabled, config flag false
- with _create_collect_logs_handler(cgroups_enabled=False, collect_logs_conf=False):
+ # Case 4:
+ # - Cgroups are disabled in the configurator
+ # - Cgroup v2 is in use
+ # - Cgroup v2 resource limiting conf is False
+ # - collect logs config flag false
+ with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V2, cgroupv2_resource_limiting_conf=False, collect_logs_conf=False):
self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
- # cgroups enabled, config flag false
- with _create_collect_logs_handler(cgroups_enabled=True, collect_logs_conf=False):
+ # Case 5:
+ # - Cgroups are disabled in the configurator
+ # - Cgroup v2 is not in use
+ # - Cgroup v2 resource limiting conf is True
+ # - collect logs config flag false
+ with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=True, collect_logs_conf=False):
self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
- # cgroups not enabled, config flag true
- with _create_collect_logs_handler(cgroups_enabled=False, collect_logs_conf=True):
+ # Case 6:
+ # - Cgroups are disabled in the configurator
+ # - Cgroup v2 is not in use
+ # - Cgroup v2 resource limiting conf is False
+ # - collect logs config flag false
+ with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=False, collect_logs_conf=False):
self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
- # cgroups enabled, config flag true
- with _create_collect_logs_handler(cgroups_enabled=True, collect_logs_conf=True):
+ # If collect logs is enabled in the configuration and cgroups are enbaled in the configurator, then log collection should always be enabled
+
+ # Case 7:
+ # - Cgroups are enabled in the configurator
+ # - Cgroup v2 is not in use
+ # - Cgroup v2 resource limiting conf is True
+ # - collect logs config flag true
+ with _create_collect_logs_handler(cgroups_enabled=True, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=True, collect_logs_conf=True):
self.assertEqual(True, is_log_collection_allowed(), "Log collection should have been enabled")
+ # Case 8:
+ # - Cgroups are enabled in the configurator
+ # - Cgroup v2 is not in use
+ # - Cgroup v2 resource limiting conf is False
+ # - collect logs config flag true
+ with _create_collect_logs_handler(cgroups_enabled=True, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=False, collect_logs_conf=True):
+ self.assertEqual(True, is_log_collection_allowed(), "Log collection should have been enabled")
+
+ # If collect logs is enabled in the configuration and v2 is in use with the v2 resource limiting conf enabled, then log collection should always be enabled
+
+ # Case 9:
+ # - Cgroups are disabled in the configurator
+ # - Cgroup v2 is in use
+ # - Cgroup v2 resource limiting conf is True
+ # - collect logs config flag true
+ with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V2, cgroupv2_resource_limiting_conf=True, collect_logs_conf=True):
+ self.assertEqual(True, is_log_collection_allowed(), "Log collection should have been enabled")
+
+ # If collect logs is enabled in the configuration and v2 is in use but the v2 resource limiting conf disabled, then log collection should always be disabled
+
+ # Case 10:
+ # - Cgroups are disabled in the configurator
+ # - Cgroup v2 is in use
+ # - Cgroup v2 resource limiting conf is False
+ # - collect logs config flag true
+ with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V2, cgroupv2_resource_limiting_conf=False, collect_logs_conf=True):
+ self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
+
+ # If collect logs is enabled in the configuration but cgroups are disabled in the configurator and v2 is not in use, then log collections should always be disabled
+
+ # Case 11:
+ # - Cgroups are disabled in the configurator
+ # - Cgroup v2 is not in use
+ # - Cgroup v2 resource limiting conf is True
+ # - collect logs config flag true
+ with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=True, collect_logs_conf=True):
+ self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
+
+ # Case 12:
+ # - Cgroups are disabled in the configurator
+ # - Cgroup v2 is not in use
+ # - Cgroup v2 resource limiting conf is False
+ # - collect logs config flag true
+ with _create_collect_logs_handler(cgroups_enabled=False, cgroup_version=CgroupVersions.V1, cgroupv2_resource_limiting_conf=False, collect_logs_conf=True):
+ self.assertEqual(False, is_log_collection_allowed(), "Log collection should not have been enabled")
+
def test_it_uploads_logs_when_collection_is_successful(self):
archive_size = 42
@@ -168,7 +287,7 @@ def http_put_handler(url, _, **__):
@contextlib.contextmanager
-def _create_log_collector_monitor_handler(iterations=1):
+def _create_log_collector_monitor_handler(iterations=1, cgroup_version=CgroupVersions.V1):
"""
Creates an instance of LogCollectorMonitorHandler that
* Runs its main loop only the number of times given in the 'iterations' parameter, and
@@ -184,22 +303,40 @@ def _create_log_collector_monitor_handler(iterations=1):
original_read_file = fileutil.read_file
- def mock_read_file(filepath, **args):
+ def mock_read_file_v1(filepath, **args):
if filepath == "/proc/stat":
- filepath = os.path.join(data_dir, "cgroups", "proc_stat_t0")
+ filepath = os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0")
elif filepath.endswith("/cpuacct.stat"):
- filepath = os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
+ filepath = os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0")
+ return original_read_file(filepath, **args)
+
+ def mock_read_file_v2(filepath, **args):
+ if filepath == "/proc/uptime":
+ filepath = os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0")
+ elif filepath.endswith("/cpu.stat"):
+ filepath = os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0")
return original_read_file(filepath, **args)
+ mock_read_file = None
+ cgroups = []
+ if cgroup_version == "v1":
+ mock_read_file = mock_read_file_v1
+ cgroups = [
+ CpuControllerV1("test", "dummy_cpu_path"),
+ MemoryControllerV1("test", "dummy_memory_path")
+ ]
+ else:
+ mock_read_file = mock_read_file_v2
+ cgroups = [
+ CpuControllerV2("test", "dummy_cpu_path"),
+ MemoryControllerV2("test", "dummy_memory_path")
+ ]
+
with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file):
def run_and_wait():
monitor_log_collector.run()
monitor_log_collector.join()
- cgroups = [
- CpuCgroup("test", "dummy_cpu_path"),
- MemoryCgroup("test", "dummy_memory_path")
- ]
monitor_log_collector = get_log_collector_monitor_handler(cgroups)
monitor_log_collector.run_and_wait = run_and_wait
yield monitor_log_collector
@@ -207,33 +344,78 @@ def run_and_wait():
class TestLogCollectorMonitorHandler(AgentTestCase):
- @patch('azurelinuxagent.common.event.EventLogger.add_metric')
- @patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage")
- def test_send_extension_metrics_telemetry(self, patch_poll_resource_usage, patch_add_metric):
+ def test_get_max_recorded_metrics(self):
+ with _create_log_collector_monitor_handler(iterations=2) as log_collector_monitor_handler:
+ nonlocal_vars = {
+ 'cpu_iteration': 0,
+ 'mem_iteration': 0,
+ 'multiplier': 5
+ }
+
+ def get_different_cpu_metrics(**kwargs): # pylint: disable=W0613
+ metrics = [MetricValue("Process", MetricsCounter.PROCESSOR_PERCENT_TIME, "service", 4.5), MetricValue("Process", MetricsCounter.THROTTLED_TIME, "service", nonlocal_vars['cpu_iteration']*nonlocal_vars['multiplier'] + 10.000)]
+ nonlocal_vars['cpu_iteration'] += 1
+ return metrics
+
+ def get_different_memory_metrics(**kwargs): # pylint: disable=W0613
+ metrics = [MetricValue("Memory", MetricsCounter.TOTAL_MEM_USAGE, "service", 20),
+ MetricValue("Memory", MetricsCounter.ANON_MEM_USAGE, "service", 15),
+ MetricValue("Memory", MetricsCounter.CACHE_MEM_USAGE, "service", nonlocal_vars['mem_iteration']*nonlocal_vars['multiplier'] + 5),
+ MetricValue("Memory", MetricsCounter.MAX_MEM_USAGE, "service", 30),
+ MetricValue("Memory", MetricsCounter.SWAP_MEM_USAGE, "service", 0)]
+ nonlocal_vars['mem_iteration'] += 1
+ return metrics
+
+ with patch("azurelinuxagent.ga.cpucontroller._CpuController.get_tracked_metrics", side_effect=get_different_cpu_metrics):
+ with patch("azurelinuxagent.ga.memorycontroller._MemoryController.get_tracked_metrics", side_effect=get_different_memory_metrics):
+ log_collector_monitor_handler.run_and_wait()
+ max_recorded_metrics = log_collector_monitor_handler.get_max_recorded_metrics()
+ self.assertEqual(len(max_recorded_metrics), 7)
+ self.assertEqual(max_recorded_metrics[MetricsCounter.PROCESSOR_PERCENT_TIME], 4.5)
+ self.assertEqual(max_recorded_metrics[MetricsCounter.THROTTLED_TIME], 15.0)
+ self.assertEqual(max_recorded_metrics[MetricsCounter.TOTAL_MEM_USAGE], 20)
+ self.assertEqual(max_recorded_metrics[MetricsCounter.ANON_MEM_USAGE], 15)
+ self.assertEqual(max_recorded_metrics[MetricsCounter.CACHE_MEM_USAGE], 10)
+ self.assertEqual(max_recorded_metrics[MetricsCounter.MAX_MEM_USAGE], 30)
+ self.assertEqual(max_recorded_metrics[MetricsCounter.SWAP_MEM_USAGE], 0)
+
+ def test_verify_log_collector_memory_limit_exceeded(self):
with _create_log_collector_monitor_handler() as log_collector_monitor_handler:
- patch_poll_resource_usage.return_value = [MetricValue("Process", "% Processor Time", "service", 1),
- MetricValue("Process", "Throttled Time", "service", 1),
- MetricValue("Memory", "Total Memory Usage", "service", 1),
- MetricValue("Memory", "Max Memory Usage", "service", 1),
- MetricValue("Memory", "Swap Memory Usage", "service", 1)
- ]
- log_collector_monitor_handler.run_and_wait()
- self.assertEqual(1, patch_poll_resource_usage.call_count)
- self.assertEqual(5, patch_add_metric.call_count) # Five metrics being sent.
-
- @patch("os._exit", side_effect=Exception)
- @patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage")
- def test_verify_log_collector_memory_limit_exceeded(self, patch_poll_resource_usage, mock_exit):
+ cache_exceeded = [MetricValue("Process", MetricsCounter.PROCESSOR_PERCENT_TIME, "service", 4.5),
+ MetricValue("Process", MetricsCounter.THROTTLED_TIME, "service", 10.281),
+ MetricValue("Memory", MetricsCounter.TOTAL_MEM_USAGE, "service", 170 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.ANON_MEM_USAGE, "service", 15 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.CACHE_MEM_USAGE, "service", 160 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.MAX_MEM_USAGE, "service", 171 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.SWAP_MEM_USAGE, "service", 0)]
+ with patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage", return_value=cache_exceeded):
+ with patch("os._exit") as mock_exit:
+ log_collector_monitor_handler.run_and_wait()
+ self.assertEqual(mock_exit.call_count, 1)
+
with _create_log_collector_monitor_handler() as log_collector_monitor_handler:
- with patch("azurelinuxagent.ga.cgroupconfigurator.LOGCOLLECTOR_MEMORY_LIMIT", 8):
- patch_poll_resource_usage.return_value = [MetricValue("Process", "% Processor Time", "service", 1),
- MetricValue("Process", "Throttled Time", "service", 1),
- MetricValue("Memory", "Total Memory Usage", "service", 9),
- MetricValue("Memory", "Max Memory Usage", "service", 7),
- MetricValue("Memory", "Swap Memory Usage", "service", 0)
-
- ]
- try:
+ anon_exceeded = [MetricValue("Process", MetricsCounter.PROCESSOR_PERCENT_TIME, "service", 4.5),
+ MetricValue("Process", MetricsCounter.THROTTLED_TIME, "service", 10.281),
+ MetricValue("Memory", MetricsCounter.TOTAL_MEM_USAGE, "service", 170 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.ANON_MEM_USAGE, "service", 30 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.CACHE_MEM_USAGE, "service", 140 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.MAX_MEM_USAGE, "service", 171 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.SWAP_MEM_USAGE, "service", 0)]
+ with patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage", return_value=anon_exceeded):
+ with patch("os._exit") as mock_exit:
+ log_collector_monitor_handler.run_and_wait()
+ self.assertEqual(mock_exit.call_count, 1)
+
+ with _create_log_collector_monitor_handler(cgroup_version=CgroupVersions.V2) as log_collector_monitor_handler:
+ mem_throttled_exceeded = [MetricValue("Process", MetricsCounter.PROCESSOR_PERCENT_TIME, "service", 4.5),
+ MetricValue("Process", MetricsCounter.THROTTLED_TIME, "service", 10.281),
+ MetricValue("Memory", MetricsCounter.TOTAL_MEM_USAGE, "service", 170 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.ANON_MEM_USAGE, "service", 15 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.CACHE_MEM_USAGE, "service", 140 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.MAX_MEM_USAGE, "service", 171 * 1024 ** 2),
+ MetricValue("Memory", MetricsCounter.SWAP_MEM_USAGE, "service", 0),
+ MetricValue("Memory", MetricsCounter.MEM_THROTTLED, "service", 11)]
+ with patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler._poll_resource_usage", return_value=mem_throttled_exceeded):
+ with patch("os._exit") as mock_exit:
log_collector_monitor_handler.run_and_wait()
- except Exception:
self.assertEqual(mock_exit.call_count, 1)
diff --git a/tests/ga/test_cpucontroller.py b/tests/ga/test_cpucontroller.py
new file mode 100644
index 000000000..bc5fc4070
--- /dev/null
+++ b/tests/ga/test_cpucontroller.py
@@ -0,0 +1,313 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.4+ and Openssl 1.0+
+#
+
+from __future__ import print_function
+
+import errno
+import os
+import random
+import shutil
+
+from azurelinuxagent.ga.cgroupcontroller import MetricsCounter
+from azurelinuxagent.ga.cpucontroller import CpuControllerV1, CpuControllerV2
+from azurelinuxagent.common.exception import CGroupsException
+from azurelinuxagent.common.osutil import get_osutil
+from azurelinuxagent.common.utils import fileutil
+from tests.lib.tools import AgentTestCase, patch, data_dir
+
+
+def consume_cpu_time():
+ waste = 0
+ for x in range(1, 200000): # pylint: disable=unused-variable
+ waste += random.random()
+ return waste
+
+
+class TestCpuControllerV1(AgentTestCase):
+ @classmethod
+ def setUpClass(cls):
+ AgentTestCase.setUpClass()
+
+ original_read_file = fileutil.read_file
+
+ #
+ # Tests that need to mock the contents of /proc/stat or */cpuacct/stat can set this map from
+ # the file that needs to be mocked to the mock file (each test starts with an empty map). If
+ # an Exception is given instead of a path, the exception is raised
+ #
+ cls.mock_read_file_map = {}
+
+ def mock_read_file(filepath, **args):
+ if filepath in cls.mock_read_file_map:
+ mapped_value = cls.mock_read_file_map[filepath]
+ if isinstance(mapped_value, Exception):
+ raise mapped_value
+ filepath = mapped_value
+ return original_read_file(filepath, **args)
+
+ cls.mock_read_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file)
+ cls.mock_read_file.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.mock_read_file.stop()
+ AgentTestCase.tearDownClass()
+
+ def setUp(self):
+ AgentTestCase.setUp(self)
+ TestCpuControllerV1.mock_read_file_map.clear()
+
+ def test_initialize_cpu_usage_v1_should_set_current_cpu_usage(self):
+ controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ TestCpuControllerV1.mock_read_file_map = {
+ "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0"),
+ os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0")
+ }
+
+ controller.initialize_cpu_usage()
+
+ self.assertEqual(controller._current_cgroup_cpu, 63763)
+ self.assertEqual(controller._current_system_cpu, 5496872)
+
+ def test_get_cpu_usage_v1_should_return_the_cpu_usage_since_its_last_invocation(self):
+ osutil = get_osutil()
+
+ controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ TestCpuControllerV1.mock_read_file_map = {
+ "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0"),
+ os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0")
+ }
+
+ controller.initialize_cpu_usage()
+
+ TestCpuControllerV1.mock_read_file_map = {
+ "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t1"),
+ os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t1")
+ }
+
+ cpu_usage = controller.get_cpu_usage()
+
+ self.assertEqual(cpu_usage, round(100.0 * 0.000307697876885 * osutil.get_processor_cores(), 3))
+
+ TestCpuControllerV1.mock_read_file_map = {
+ "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t2"),
+ os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t2")
+ }
+
+ cpu_usage = controller.get_cpu_usage()
+
+ self.assertEqual(cpu_usage, round(100.0 * 0.000445181085968 * osutil.get_processor_cores(), 3))
+
+ def test_initialize_cpu_usage_v1_should_set_the_cgroup_usage_to_0_when_the_cgroup_does_not_exist(self):
+ controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ io_error_2 = IOError()
+ io_error_2.errno = errno.ENOENT # "No such directory"
+
+ TestCpuControllerV1.mock_read_file_map = {
+ "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0"),
+ os.path.join(controller.path, "cpuacct.stat"): io_error_2
+ }
+
+ controller.initialize_cpu_usage()
+
+ self.assertEqual(controller._current_cgroup_cpu, 0)
+ self.assertEqual(controller._current_system_cpu, 5496872) # check the system usage just for test sanity
+
+ def test_initialize_cpu_usage_v1_should_raise_an_exception_when_called_more_than_once(self):
+ controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ TestCpuControllerV1.mock_read_file_map = {
+ "/proc/stat": os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0"),
+ os.path.join(controller.path, "cpuacct.stat"): os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0")
+ }
+
+ controller.initialize_cpu_usage()
+
+ with self.assertRaises(CGroupsException):
+ controller.initialize_cpu_usage()
+
+ def test_get_cpu_usage_v1_should_raise_an_exception_when_initialize_cpu_usage_has_not_been_invoked(self):
+ controller = CpuControllerV1("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ with self.assertRaises(CGroupsException):
+ cpu_usage = controller.get_cpu_usage() # pylint: disable=unused-variable
+
+ def test_get_throttled_time_v1_should_return_the_value_since_its_last_invocation(self):
+ test_file = os.path.join(self.tmp_dir, "cpu.stat")
+ shutil.copyfile(os.path.join(data_dir, "cgroups", "v1", "cpu.stat_t0"), test_file) # throttled_time = 50
+ controller = CpuControllerV1("test", self.tmp_dir)
+ controller.initialize_cpu_usage()
+ shutil.copyfile(os.path.join(data_dir, "cgroups", "v1", "cpu.stat_t1"), test_file) # throttled_time = 2075541442327
+
+ throttled_time = controller.get_cpu_throttled_time()
+
+ self.assertEqual(throttled_time, round(float(2075541442327 - 50) / 1E9, 3), "The value of throttled_time is incorrect")
+
+ def test_get_tracked_metrics_v1_should_return_the_throttled_time(self):
+ controller = CpuControllerV1("test", os.path.join(data_dir, "cgroups", "v1"))
+ controller.initialize_cpu_usage()
+
+ def find_throttled_time(metrics):
+ return [m for m in metrics if m.counter == MetricsCounter.THROTTLED_TIME]
+
+ found = find_throttled_time(controller.get_tracked_metrics())
+ self.assertTrue(len(found) == 0, "get_tracked_metrics should not fetch the throttled time by default. Found: {0}".format(found))
+
+ found = find_throttled_time(controller.get_tracked_metrics(track_throttled_time=True))
+ self.assertTrue(len(found) == 1, "get_tracked_metrics should have fetched the throttled time by default. Found: {0}".format(found))
+
+
+class TestCpuControllerV2(AgentTestCase):
+ @classmethod
+ def setUpClass(cls):
+ AgentTestCase.setUpClass()
+
+ original_read_file = fileutil.read_file
+
+ #
+ # Tests that need to mock the contents of /proc/stat or */cpuacct/stat can set this map from
+ # the file that needs to be mocked to the mock file (each test starts with an empty map). If
+ # an Exception is given instead of a path, the exception is raised
+ #
+ cls.mock_read_file_map = {}
+
+ def mock_read_file(filepath, **args):
+ if filepath in cls.mock_read_file_map:
+ mapped_value = cls.mock_read_file_map[filepath]
+ if isinstance(mapped_value, Exception):
+ raise mapped_value
+ filepath = mapped_value
+ return original_read_file(filepath, **args)
+
+ cls.mock_read_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file)
+ cls.mock_read_file.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.mock_read_file.stop()
+ AgentTestCase.tearDownClass()
+
+ def setUp(self):
+ AgentTestCase.setUp(self)
+ TestCpuControllerV2.mock_read_file_map.clear()
+
+ def test_initialize_cpu_usage_v2_should_set_current_cpu_usage(self):
+ controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ TestCpuControllerV2.mock_read_file_map = {
+ "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0"),
+ os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0")
+ }
+
+ controller.initialize_cpu_usage()
+
+ self.assertEqual(controller._current_cgroup_cpu, 817045397 / 1E6)
+ self.assertEqual(controller._current_system_cpu, 776968.02)
+
+ def test_get_cpu_usage_v2_should_return_the_cpu_usage_since_its_last_invocation(self):
+ controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ TestCpuControllerV2.mock_read_file_map = {
+ "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0"),
+ os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0")
+ }
+
+ controller.initialize_cpu_usage()
+
+ TestCpuControllerV2.mock_read_file_map = {
+ "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t1"),
+ os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t1")
+ }
+
+ cpu_usage = controller.get_cpu_usage()
+
+ cgroup_usage_delta = (819624087 / 1E6) - (817045397 / 1E6)
+ system_usage_delta = 777350.57 - 776968.02
+ self.assertEqual(cpu_usage, round(100.0 * cgroup_usage_delta/system_usage_delta, 3))
+
+ TestCpuControllerV2.mock_read_file_map = {
+ "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t2"),
+ os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t2")
+ }
+
+ cpu_usage = controller.get_cpu_usage()
+
+ cgroup_usage_delta = (822052295 / 1E6) - (819624087 / 1E6)
+ system_usage_delta = 779218.68 - 777350.57
+ self.assertEqual(cpu_usage, round(100.0 * cgroup_usage_delta/system_usage_delta, 3))
+
+ def test_initialize_cpu_usage_v2_should_set_the_cgroup_usage_to_0_when_the_cgroup_does_not_exist(self):
+ controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ io_error_2 = IOError()
+ io_error_2.errno = errno.ENOENT # "No such directory"
+
+ TestCpuControllerV2.mock_read_file_map = {
+ "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0"),
+ os.path.join(controller.path, "cpu.stat"): io_error_2
+ }
+
+ controller.initialize_cpu_usage()
+
+ self.assertEqual(controller._current_cgroup_cpu, 0)
+ self.assertEqual(controller._current_system_cpu, 776968.02) # check the system usage just for test sanity
+
+ def test_initialize_cpu_usage_v2_should_raise_an_exception_when_called_more_than_once(self):
+ controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ TestCpuControllerV2.mock_read_file_map = {
+ "/proc/uptime": os.path.join(data_dir, "cgroups", "v2", "proc_uptime_t0"),
+ os.path.join(controller.path, "cpu.stat"): os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0")
+ }
+
+ controller.initialize_cpu_usage()
+
+ with self.assertRaises(CGroupsException):
+ controller.initialize_cpu_usage()
+
+ def test_get_cpu_usage_v2_should_raise_an_exception_when_initialize_cpu_usage_has_not_been_invoked(self):
+ controller = CpuControllerV2("test", "/sys/fs/cgroup/cpu/system.slice/test")
+
+ with self.assertRaises(CGroupsException):
+ cpu_usage = controller.get_cpu_usage() # pylint: disable=unused-variable
+
+ def test_get_throttled_time_v2_should_return_the_value_since_its_last_invocation(self):
+ test_file = os.path.join(self.tmp_dir, "cpu.stat")
+ shutil.copyfile(os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t0"), test_file) # throttled_time = 15735198706
+ controller = CpuControllerV2("test", self.tmp_dir)
+ controller.initialize_cpu_usage()
+ shutil.copyfile(os.path.join(data_dir, "cgroups", "v2", "cpu.stat_t1"), test_file) # throttled_usec = 15796563650
+
+ throttled_time = controller.get_cpu_throttled_time()
+
+ self.assertEqual(throttled_time, round(float(15796563650 - 15735198706) / 1E6, 3), "The value of throttled_time is incorrect")
+
+ def test_get_tracked_metrics_v2_should_return_the_throttled_time(self):
+ controller = CpuControllerV2("test", os.path.join(data_dir, "cgroups", "v2"))
+ controller.initialize_cpu_usage()
+
+ def find_throttled_time(metrics):
+ return [m for m in metrics if m.counter == MetricsCounter.THROTTLED_TIME]
+
+ found = find_throttled_time(controller.get_tracked_metrics())
+ self.assertTrue(len(found) == 0, "get_tracked_metrics should not fetch the throttled time by default. Found: {0}".format(found))
+
+ found = find_throttled_time(controller.get_tracked_metrics(track_throttled_time=True))
+ self.assertTrue(len(found) == 1, "get_tracked_metrics should have fetched the throttled time by default. Found: {0}".format(found))
diff --git a/tests/ga/test_extension.py b/tests/ga/test_extension.py
index 95b2427bc..0a4ac2be0 100644
--- a/tests/ga/test_extension.py
+++ b/tests/ga/test_extension.py
@@ -3436,6 +3436,76 @@ def mock_popen(cmd, *args, **kwargs):
self.assertIn('[stdout]\n{0}'.format(expected), message, "The extension's stdout was not redacted correctly")
self.assertIn('[stderr]\n{0}'.format(expected), message, "The extension's stderr was not redacted correctly")
+class TestExtensionHandlerManifest(AgentTestCase):
+
+ def setUp(self):
+ AgentTestCase.setUp(self)
+ self.ext_handler = Extension(name='foo')
+ self.ext_handler.version = "1.2.3"
+ self.ext_handler_instance = ExtHandlerInstance(ext_handler=self.ext_handler, protocol=WireProtocol("1.2.3.4"))
+ self.test_file = os.path.join(self.tmp_dir, "HandlerManifest.json")
+
+ def test_handler_manifest_parsed_correctly(self):
+ shutil.copyfile(os.path.join(data_dir, "ext", "handler_manifest", "valid_manifest.json"), self.test_file)
+
+ with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_manifest_file", return_value=self.test_file):
+ manifest = self.ext_handler_instance.load_manifest()
+ self.assertEqual(manifest.get_install_command(), "install_cmd")
+ self.assertEqual(manifest.get_enable_command(), "enable_cmd")
+ self.assertEqual(manifest.get_uninstall_command(), "uninstall_cmd")
+ self.assertEqual(manifest.get_update_command(), "update_cmd")
+ self.assertEqual(manifest.get_disable_command(), "disable_cmd")
+ self.assertTrue(manifest.is_continue_on_update_failure())
+ self.assertTrue(manifest.is_report_heartbeat())
+ self.assertTrue(manifest.supports_multiple_extensions())
+
+ def test_handler_manifest_defaults(self):
+ # Set only the required fields
+ shutil.copyfile(os.path.join(data_dir, "ext", "handler_manifest", "manifest_no_optional_fields.json"), self.test_file)
+ with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_manifest_file", return_value=self.test_file):
+ manifest = self.ext_handler_instance.load_manifest()
+ self.assertFalse(manifest.is_continue_on_update_failure())
+ self.assertFalse(manifest.is_report_heartbeat())
+ self.assertFalse(manifest.supports_multiple_extensions())
+
+ def test_handler_manifest_boolean_fields(self):
+ # Set the boolean fields to strings
+ shutil.copyfile(os.path.join(data_dir, "ext", "handler_manifest", "manifest_boolean_fields_strings.json"), self.test_file)
+ with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_manifest_file", return_value=self.test_file):
+ manifest = self.ext_handler_instance.load_manifest()
+ self.assertTrue(manifest.is_continue_on_update_failure())
+ self.assertTrue(manifest.is_report_heartbeat())
+ self.assertTrue(manifest.supports_multiple_extensions())
+
+ # set the boolean fields to invalid values
+ shutil.copyfile(os.path.join(data_dir, "ext", "handler_manifest", "manifest_boolean_fields_invalid.json"), self.test_file)
+ with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_manifest_file", return_value=self.test_file):
+ manifest = self.ext_handler_instance.load_manifest()
+ self.assertFalse(manifest.is_continue_on_update_failure())
+ self.assertFalse(manifest.is_report_heartbeat())
+ self.assertFalse(manifest.supports_multiple_extensions())
+
+ # set the boolean fields to 'false' string
+ shutil.copyfile(os.path.join(data_dir, "ext", "handler_manifest", "manifest_boolean_fields_false.json"), self.test_file)
+ with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_manifest_file", return_value=self.test_file):
+ manifest = self.ext_handler_instance.load_manifest()
+ self.assertFalse(manifest.is_continue_on_update_failure())
+ self.assertFalse(manifest.is_report_heartbeat())
+ self.assertFalse(manifest.supports_multiple_extensions())
+
+ def test_report_msg_if_handler_manifest_contains_invalid_values(self):
+ # Set the boolean fields to invalid values
+ shutil.copyfile(os.path.join(data_dir, "ext", "handler_manifest", "manifest_boolean_fields_invalid.json"), self.test_file)
+ with patch("azurelinuxagent.ga.exthandlers.ExtHandlerInstance.get_manifest_file", return_value=self.test_file):
+ with patch("azurelinuxagent.ga.exthandlers.add_event") as mock_add_event:
+ manifest = self.ext_handler_instance.load_manifest()
+ manifest.report_invalid_boolean_properties("test_ext")
+ kw_messages = [kw for _, kw in mock_add_event.call_args_list if kw.get('op') == 'ExtensionHandlerManifest']
+ self.assertEqual(3, len(kw_messages))
+ self.assertIn("'reportHeartbeat' has a non-boolean value", kw_messages[0]['message'])
+ self.assertIn("'continueOnUpdateFailure' has a non-boolean value", kw_messages[1]['message'])
+ self.assertIn("'supportsMultipleExtensions' has a non-boolean value", kw_messages[2]['message'])
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/ga/test_exthandlers.py b/tests/ga/test_exthandlers.py
index f56ebce14..3252dcb23 100644
--- a/tests/ga/test_exthandlers.py
+++ b/tests/ga/test_exthandlers.py
@@ -681,7 +681,7 @@ def test_it_should_read_only_the_head_of_large_outputs(self):
self.assertGreaterEqual(len(output), 1024)
self.assertLessEqual(len(output), TELEMETRY_MESSAGE_MAX_LEN)
- mock_format.assert_called_once()
+ self.assertEqual(1, mock_format.call_count, "format_stdout_stderr should be called once")
args, kwargs = mock_format.call_args # pylint: disable=unused-variable
stdout, stderr = args
diff --git a/tests/ga/test_exthandlers_download_extension.py b/tests/ga/test_exthandlers_download_extension.py
index b3ed96a89..9f56a0202 100644
--- a/tests/ga/test_exthandlers_download_extension.py
+++ b/tests/ga/test_exthandlers_download_extension.py
@@ -127,8 +127,8 @@ def stream(_, destination, **__):
self.ext_handler_instance.download()
# first download attempt should succeed
- mock_stream.assert_called_once()
- mock_report_event.assert_called_once()
+ self.assertEqual(1, mock_stream.call_count, "wireserver stream should be called once")
+ self.assertEqual(1, mock_report_event.call_count, "report_event should be called once")
self._assert_download_and_expand_succeeded()
@@ -154,7 +154,7 @@ def stream(_, destination, **__):
with DownloadExtensionTestCase.create_mock_stream(stream) as mock_stream:
self.ext_handler_instance.download()
- mock_stream.assert_called_once()
+ self.assertEqual(1, mock_stream.call_count, "wireserver stream should be called once")
self._assert_download_and_expand_succeeded()
@@ -179,7 +179,8 @@ def stream(_, destination, **__):
with DownloadExtensionTestCase.create_mock_stream(stream) as mock_stream:
self.ext_handler_instance.download()
- mock_stream.assert_called_once()
+ self.assertEqual(1, mock_stream.call_count, "wireserver stream should be called once")
+
self._assert_download_and_expand_succeeded()
self.assertEqual(self.ext_handler_instance.get_handler_state(), ExtHandlerState.NotInstalled,
"Ensure that the state is maintained for extension HandlerState")
diff --git a/tests/ga/test_exthandlers_exthandlerinstance.py b/tests/ga/test_exthandlers_exthandlerinstance.py
index 846bb89e9..5b98c9f41 100644
--- a/tests/ga/test_exthandlers_exthandlerinstance.py
+++ b/tests/ga/test_exthandlers_exthandlerinstance.py
@@ -117,7 +117,7 @@ def test_rm_ext_handler_dir_should_report_an_event_if_an_error_occurs_while_dele
def mock_remove(path, dir_fd=None): # pylint: disable=unused-argument
if path.endswith("extension_file2"):
- raise IOError("A mocked error")
+ raise IOError(999,"A mocked error","extension_file2")
original_remove_api(path)
with patch.object(shutil.os, remove_api_name, mock_remove):
diff --git a/tests/ga/test_logcollector.py b/tests/ga/test_logcollector.py
index cedf894b0..6a8be83af 100644
--- a/tests/ga/test_logcollector.py
+++ b/tests/ga/test_logcollector.py
@@ -79,9 +79,9 @@ def _mock_cgroup(cls):
def mock_read_file(filepath, **args):
if filepath == "/proc/stat":
- filepath = os.path.join(data_dir, "cgroups", "proc_stat_t0")
+ filepath = os.path.join(data_dir, "cgroups", "v1", "proc_stat_t0")
elif filepath.endswith("/cpuacct.stat"):
- filepath = os.path.join(data_dir, "cgroups", "cpuacct.stat_t0")
+ filepath = os.path.join(data_dir, "cgroups", "v1", "cpuacct.stat_t0")
return original_read_file(filepath, **args)
cls._mock_read_cpu_cgroup_file = patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=mock_read_file)
@@ -213,7 +213,7 @@ def test_log_collector_parses_commands_in_manifest(self):
with patch("azurelinuxagent.ga.logcollector.MANIFEST_NORMAL", manifest):
with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'):
log_collector = LogCollector()
- archive = log_collector.collect_logs_and_get_archive()
+ archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive()
with open(self.output_results_file_path, "r") as fh:
results = fh.readlines()
@@ -227,6 +227,7 @@ def test_log_collector_parses_commands_in_manifest(self):
# Assert copy was parsed
self._assert_archive_created(archive)
self._assert_files_are_in_archive(expected_files=[file_to_collect])
+ self.assertEqual(uncompressed_file_size, os.path.getsize(file_to_collect))
no_files = self._get_number_of_files_in_archive()
self.assertEqual(1, no_files, "Expected 1 file in archive, found {0}!".format(no_files))
@@ -242,10 +243,11 @@ def test_log_collector_uses_full_manifest_when_full_mode_enabled(self):
with patch("azurelinuxagent.ga.logcollector.MANIFEST_FULL", manifest):
with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'):
log_collector = LogCollector(is_full_mode=True)
- archive = log_collector.collect_logs_and_get_archive()
+ archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive()
self._assert_archive_created(archive)
self._assert_files_are_in_archive(expected_files=[file_to_collect])
+ self.assertEqual(uncompressed_file_size, os.path.getsize(file_to_collect))
no_files = self._get_number_of_files_in_archive()
self.assertEqual(1, no_files, "Expected 1 file in archive, found {0}!".format(no_files))
@@ -256,7 +258,7 @@ def test_log_collector_should_collect_all_files(self):
with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'):
log_collector = LogCollector()
- archive = log_collector.collect_logs_and_get_archive()
+ archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive()
self._assert_archive_created(archive)
@@ -269,6 +271,10 @@ def test_log_collector_should_collect_all_files(self):
os.path.join(self.root_collect_dir, "another_dir", "least_important_file")
]
self._assert_files_are_in_archive(expected_files)
+ expected_total_uncompressed_size = 0
+ for file in expected_files:
+ expected_total_uncompressed_size += os.path.getsize(file)
+ self.assertEqual(uncompressed_file_size, expected_total_uncompressed_size)
no_files = self._get_number_of_files_in_archive()
self.assertEqual(6, no_files, "Expected 6 files in archive, found {0}!".format(no_files))
@@ -278,7 +284,7 @@ def test_log_collector_should_truncate_large_text_files_and_ignore_large_binary_
with patch("azurelinuxagent.ga.logcollector._FILE_SIZE_LIMIT", SMALL_FILE_SIZE):
with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'):
log_collector = LogCollector()
- archive = log_collector.collect_logs_and_get_archive()
+ archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive()
self._assert_archive_created(archive)
@@ -294,6 +300,13 @@ def test_log_collector_should_truncate_large_text_files_and_ignore_large_binary_
]
self._assert_files_are_in_archive(expected_files)
self._assert_files_are_not_in_archive(unexpected_files)
+ total_uncompressed_file_size = 0
+ for file in expected_files:
+ if file.startswith("truncated_"):
+ total_uncompressed_file_size += SMALL_FILE_SIZE
+ else:
+ total_uncompressed_file_size += os.path.getsize(file)
+ self.assertEqual(total_uncompressed_file_size, uncompressed_file_size)
no_files = self._get_number_of_files_in_archive()
self.assertEqual(5, no_files, "Expected 5 files in archive, found {0}!".format(no_files))
@@ -312,7 +325,7 @@ def test_log_collector_should_prioritize_important_files_if_archive_too_big(self
with patch("azurelinuxagent.ga.logcollector._MUST_COLLECT_FILES", must_collect_files):
with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'):
log_collector = LogCollector()
- archive = log_collector.collect_logs_and_get_archive()
+ archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive()
self._assert_archive_created(archive)
@@ -328,6 +341,10 @@ def test_log_collector_should_prioritize_important_files_if_archive_too_big(self
]
self._assert_files_are_in_archive(expected_files)
self._assert_files_are_not_in_archive(unexpected_files)
+ expected_total_uncompressed_size = 0
+ for file in expected_files:
+ expected_total_uncompressed_size += os.path.getsize(file)
+ self.assertEqual(uncompressed_file_size, expected_total_uncompressed_size)
no_files = self._get_number_of_files_in_archive()
self.assertEqual(3, no_files, "Expected 3 files in archive, found {0}!".format(no_files))
@@ -338,7 +355,7 @@ def test_log_collector_should_prioritize_important_files_if_archive_too_big(self
with patch("azurelinuxagent.ga.logcollector._UNCOMPRESSED_ARCHIVE_SIZE_LIMIT", 10 * 1024 * 1024):
with patch("azurelinuxagent.ga.logcollector._MUST_COLLECT_FILES", must_collect_files):
- second_archive = log_collector.collect_logs_and_get_archive()
+ second_archive, second_uncompressed_file_size = log_collector.collect_logs_and_get_archive()
expected_files = [
os.path.join(self.root_collect_dir, "waagent.log"),
@@ -352,6 +369,10 @@ def test_log_collector_should_prioritize_important_files_if_archive_too_big(self
]
self._assert_files_are_in_archive(expected_files)
self._assert_files_are_not_in_archive(unexpected_files)
+ expected_total_uncompressed_size = 0
+ for file in expected_files:
+ expected_total_uncompressed_size += os.path.getsize(file)
+ self.assertEqual(second_uncompressed_file_size, expected_total_uncompressed_size)
self._assert_archive_created(second_archive)
@@ -363,7 +384,7 @@ def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_d
# needs to be updated in the archive, deleted if removed from disk, and added if not previously seen.
with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'):
log_collector = LogCollector()
- first_archive = log_collector.collect_logs_and_get_archive()
+ first_archive, first_uncompressed_file_size = log_collector.collect_logs_and_get_archive()
self._assert_archive_created(first_archive)
# Everything should be in the archive
@@ -376,6 +397,10 @@ def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_d
os.path.join(self.root_collect_dir, "another_dir", "least_important_file")
]
self._assert_files_are_in_archive(expected_files)
+ expected_total_uncompressed_size = 0
+ for file in expected_files:
+ expected_total_uncompressed_size += os.path.getsize(file)
+ self.assertEqual(first_uncompressed_file_size, expected_total_uncompressed_size)
no_files = self._get_number_of_files_in_archive()
self.assertEqual(6, no_files, "Expected 6 files in archive, found {0}!".format(no_files))
@@ -392,7 +417,7 @@ def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_d
LARGE_FILE_SIZE)
rm_files(os.path.join(self.root_collect_dir, "waagent.log.1"))
- second_archive = log_collector.collect_logs_and_get_archive()
+ second_archive, second_uncompressed_file_size = log_collector.collect_logs_and_get_archive()
self._assert_archive_created(second_archive)
expected_files = [
@@ -408,6 +433,10 @@ def test_log_collector_should_update_archive_when_files_are_new_or_modified_or_d
]
self._assert_files_are_in_archive(expected_files)
self._assert_files_are_not_in_archive(unexpected_files)
+ expected_total_uncompressed_size = 0
+ for file in expected_files:
+ expected_total_uncompressed_size += os.path.getsize(file)
+ self.assertEqual(second_uncompressed_file_size, expected_total_uncompressed_size)
file = os.path.join(self.root_collect_dir, "waagent.log") # pylint: disable=redefined-builtin
new_file_size = self._get_uncompressed_file_size(file)
@@ -434,7 +463,7 @@ def test_log_collector_should_clean_up_uncollected_truncated_files(self):
with patch("azurelinuxagent.ga.logcollector._FILE_SIZE_LIMIT", SMALL_FILE_SIZE):
with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'):
log_collector = LogCollector()
- archive = log_collector.collect_logs_and_get_archive()
+ archive, uncompressed_file_size = log_collector.collect_logs_and_get_archive()
self._assert_archive_created(archive)
@@ -443,6 +472,13 @@ def test_log_collector_should_clean_up_uncollected_truncated_files(self):
self._truncated_path(os.path.join(self.root_collect_dir, "waagent.log.1")), # this file should be truncated
]
self._assert_files_are_in_archive(expected_files)
+ expected_total_uncompressed_size = 0
+ for file in expected_files:
+ if file.startswith("truncated_"):
+ expected_total_uncompressed_size += SMALL_FILE_SIZE
+ else:
+ expected_total_uncompressed_size += os.path.getsize(file)
+ self.assertEqual(uncompressed_file_size, expected_total_uncompressed_size)
no_files = self._get_number_of_files_in_archive()
self.assertEqual(2, no_files, "Expected 2 files in archive, found {0}!".format(no_files))
@@ -456,7 +492,7 @@ def test_log_collector_should_clean_up_uncollected_truncated_files(self):
with patch("azurelinuxagent.ga.logcollector._FILE_SIZE_LIMIT", SMALL_FILE_SIZE):
with patch('azurelinuxagent.ga.logcollector.LogCollector._initialize_telemetry'):
log_collector = LogCollector()
- second_archive = log_collector.collect_logs_and_get_archive()
+ second_archive, second_uncompressed_file_size = log_collector.collect_logs_and_get_archive()
expected_files = [
os.path.join(self.root_collect_dir, "waagent.log"),
@@ -467,6 +503,13 @@ def test_log_collector_should_clean_up_uncollected_truncated_files(self):
]
self._assert_files_are_in_archive(expected_files)
self._assert_files_are_not_in_archive(unexpected_files)
+ expected_total_uncompressed_size = 0
+ for file in expected_files:
+ if file.startswith("truncated_"):
+ expected_total_uncompressed_size += SMALL_FILE_SIZE
+ else:
+ expected_total_uncompressed_size += os.path.getsize(file)
+ self.assertEqual(second_uncompressed_file_size, expected_total_uncompressed_size)
self._assert_archive_created(second_archive)
diff --git a/tests/ga/test_memorycontroller.py b/tests/ga/test_memorycontroller.py
new file mode 100644
index 000000000..1beb9a33f
--- /dev/null
+++ b/tests/ga/test_memorycontroller.py
@@ -0,0 +1,124 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.4+ and Openssl 1.0+
+#
+
+from __future__ import print_function
+
+import errno
+import os
+import shutil
+
+from azurelinuxagent.ga.cgroupcontroller import CounterNotFound
+from azurelinuxagent.ga.memorycontroller import MemoryControllerV1, MemoryControllerV2
+from tests.lib.tools import AgentTestCase, data_dir
+
+
+class TestMemoryControllerV1(AgentTestCase):
+ def test_get_metrics_v1(self):
+ test_mem_controller = MemoryControllerV1("test_extension", os.path.join(data_dir, "cgroups", "v1"))
+
+ rss_memory_usage, cache_memory_usage = test_mem_controller.get_memory_usage()
+ self.assertEqual(100000, rss_memory_usage)
+ self.assertEqual(50000, cache_memory_usage)
+
+ max_memory_usage = test_mem_controller.get_max_memory_usage()
+ self.assertEqual(1000000, max_memory_usage)
+
+ swap_memory_usage = test_mem_controller.try_swap_memory_usage()
+ self.assertEqual(20000, swap_memory_usage)
+
+ def test_get_metrics_v1_when_files_not_present(self):
+ test_mem_controller = MemoryControllerV1("test_extension", os.path.join(data_dir, "cgroups"))
+
+ with self.assertRaises(IOError) as e:
+ test_mem_controller.get_memory_usage()
+
+ self.assertEqual(e.exception.errno, errno.ENOENT)
+
+ with self.assertRaises(IOError) as e:
+ test_mem_controller.get_max_memory_usage()
+
+ self.assertEqual(e.exception.errno, errno.ENOENT)
+
+ with self.assertRaises(IOError) as e:
+ test_mem_controller.try_swap_memory_usage()
+
+ self.assertEqual(e.exception.errno, errno.ENOENT)
+
+ def test_get_memory_usage_v1_counters_not_found(self):
+ test_file = os.path.join(self.tmp_dir, "memory.stat")
+ shutil.copyfile(os.path.join(data_dir, "cgroups", "v1", "memory.stat_missing"), test_file)
+ test_mem_controller = MemoryControllerV1("test_extension", self.tmp_dir)
+
+ with self.assertRaises(CounterNotFound):
+ test_mem_controller.get_memory_usage()
+
+ swap_memory_usage = test_mem_controller.try_swap_memory_usage()
+ self.assertEqual(0, swap_memory_usage)
+
+
+class TestMemoryControllerV2(AgentTestCase):
+ def test_get_metrics_v2(self):
+ test_mem_controller = MemoryControllerV2("test_extension", os.path.join(data_dir, "cgroups", "v2"))
+
+ anon_memory_usage, cache_memory_usage = test_mem_controller.get_memory_usage()
+ self.assertEqual(17589300, anon_memory_usage)
+ self.assertEqual(134553600, cache_memory_usage)
+
+ max_memory_usage = test_mem_controller.get_max_memory_usage()
+ self.assertEqual(194494464, max_memory_usage)
+
+ swap_memory_usage = test_mem_controller.try_swap_memory_usage()
+ self.assertEqual(20000, swap_memory_usage)
+
+ memory_throttled_events = test_mem_controller.get_memory_throttled_events()
+ self.assertEqual(9, memory_throttled_events)
+
+ def test_get_metrics_v2_when_files_not_present(self):
+ test_mem_controller = MemoryControllerV2("test_extension", os.path.join(data_dir, "cgroups"))
+
+ with self.assertRaises(IOError) as e:
+ test_mem_controller.get_memory_usage()
+
+ self.assertEqual(e.exception.errno, errno.ENOENT)
+
+ with self.assertRaises(IOError) as e:
+ test_mem_controller.get_max_memory_usage()
+
+ self.assertEqual(e.exception.errno, errno.ENOENT)
+
+ with self.assertRaises(IOError) as e:
+ test_mem_controller.try_swap_memory_usage()
+
+ self.assertEqual(e.exception.errno, errno.ENOENT)
+
+ with self.assertRaises(IOError) as e:
+ test_mem_controller.get_memory_throttled_events()
+
+ self.assertEqual(e.exception.errno, errno.ENOENT)
+
+ def test_get_memory_usage_v1_counters_not_found(self):
+ test_stat_file = os.path.join(self.tmp_dir, "memory.stat")
+ shutil.copyfile(os.path.join(data_dir, "cgroups", "v2", "memory.stat_missing"), test_stat_file)
+ test_events_file = os.path.join(self.tmp_dir, "memory.events")
+ shutil.copyfile(os.path.join(data_dir, "cgroups", "v2", "memory.stat_missing"), test_events_file)
+ test_mem_controller = MemoryControllerV2("test_extension", self.tmp_dir)
+
+ with self.assertRaises(CounterNotFound):
+ test_mem_controller.get_memory_usage()
+
+ with self.assertRaises(CounterNotFound):
+ test_mem_controller.get_memory_throttled_events()
diff --git a/tests/ga/test_monitor.py b/tests/ga/test_monitor.py
index 1dbec27c3..a2100cde5 100644
--- a/tests/ga/test_monitor.py
+++ b/tests/ga/test_monitor.py
@@ -21,12 +21,14 @@
import string
from azurelinuxagent.common import event, logger
-from azurelinuxagent.ga.cgroup import CpuCgroup, MemoryCgroup, MetricValue, _REPORT_EVERY_HOUR
+from azurelinuxagent.ga.cgroupcontroller import MetricValue, _REPORT_EVERY_HOUR
from azurelinuxagent.ga.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.event import EVENTS_DIRECTORY
from azurelinuxagent.common.protocol.healthservice import HealthService
from azurelinuxagent.common.protocol.util import ProtocolUtil
from azurelinuxagent.common.protocol.wire import WireProtocol
+from azurelinuxagent.ga.cpucontroller import CpuControllerV1
+from azurelinuxagent.ga.memorycontroller import MemoryControllerV1
from azurelinuxagent.ga.monitor import get_monitor_handler, PeriodicOperation, SendImdsHeartbeat, \
ResetPeriodicLogMessages, SendHostPluginHeartbeat, PollResourceUsage, \
ReportNetworkErrors, ReportNetworkConfigurationChanges, PollSystemWideResourceUsage
@@ -222,23 +224,23 @@ def test_send_extension_metrics_telemetry_for_empty_cgroup(self, patch_poll_all_
self.assertEqual(0, patch_add_metric.call_count)
@patch('azurelinuxagent.common.event.EventLogger.add_metric')
- @patch("azurelinuxagent.ga.cgroup.MemoryCgroup.get_memory_usage")
+ @patch("azurelinuxagent.ga.memorycontroller.MemoryControllerV1.get_memory_usage")
@patch('azurelinuxagent.common.logger.Logger.periodic_warn')
def test_send_extension_metrics_telemetry_handling_memory_cgroup_exceptions_errno2(self, patch_periodic_warn, # pylint: disable=unused-argument
- patch_get_memory_usage,
+ get_memory_usage,
patch_add_metric, *args):
ioerror = IOError()
ioerror.errno = 2
- patch_get_memory_usage.side_effect = ioerror
+ get_memory_usage.side_effect = ioerror
- CGroupsTelemetry._tracked["/test/path"] = MemoryCgroup("cgroup_name", "/test/path")
+ CGroupsTelemetry._tracked["/test/path"] = MemoryControllerV1("_cgroup_name", "/test/path")
PollResourceUsage().run()
self.assertEqual(0, patch_periodic_warn.call_count)
self.assertEqual(0, patch_add_metric.call_count) # No metrics should be sent.
@patch('azurelinuxagent.common.event.EventLogger.add_metric')
- @patch("azurelinuxagent.ga.cgroup.CpuCgroup.get_cpu_usage")
+ @patch("azurelinuxagent.ga.cpucontroller.CpuControllerV1.get_cpu_usage")
@patch('azurelinuxagent.common.logger.Logger.periodic_warn')
def test_send_extension_metrics_telemetry_handling_cpu_cgroup_exceptions_errno2(self, patch_periodic_warn, # pylint: disable=unused-argument
patch_cpu_usage, patch_add_metric,
@@ -247,7 +249,7 @@ def test_send_extension_metrics_telemetry_handling_cpu_cgroup_exceptions_errno2(
ioerror.errno = 2
patch_cpu_usage.side_effect = ioerror
- CGroupsTelemetry._tracked["/test/path"] = CpuCgroup("cgroup_name", "/test/path")
+ CGroupsTelemetry._tracked["/test/path"] = CpuControllerV1("_cgroup_name", "/test/path")
PollResourceUsage().run()
self.assertEqual(0, patch_periodic_warn.call_count)
diff --git a/tests/ga/test_persist_firewall_rules.py b/tests/ga/test_persist_firewall_rules.py
index adcf43b75..7754f1efb 100644
--- a/tests/ga/test_persist_firewall_rules.py
+++ b/tests/ga/test_persist_firewall_rules.py
@@ -127,13 +127,6 @@ def __assert_systemctl_called(self, cmd="enable", validate_command_called=True):
else:
self.assertNotIn(systemctl_command, self.__executed_commands, "Systemctl command {0} found".format(cmd))
- def __assert_systemctl_reloaded(self, validate_command_called=True):
- systemctl_reload = ["systemctl", "daemon-reload"]
- if validate_command_called:
- self.assertIn(systemctl_reload, self.__executed_commands, "Systemctl config not reloaded")
- else:
- self.assertNotIn(systemctl_reload, self.__executed_commands, "Systemctl config reloaded")
-
def __assert_firewall_cmd_running_called(self, validate_command_called=True):
cmd = PersistFirewallRulesHandler._FIREWALLD_RUNNING_CMD
if validate_command_called:
@@ -144,7 +137,6 @@ def __assert_firewall_cmd_running_called(self, validate_command_called=True):
def __assert_network_service_setup_properly(self):
self.__assert_systemctl_called(cmd="is-enabled", validate_command_called=True)
self.__assert_systemctl_called(cmd="enable", validate_command_called=True)
- self.__assert_systemctl_reloaded()
self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.PassThrough, validate_command_called=False)
self.assertTrue(os.path.exists(self._network_service_unit_file), "Service unit file should be there")
self.assertTrue(os.path.exists(self._binary_file), "Binary file should be there")
@@ -200,7 +192,6 @@ def __setup_and_assert_network_service_setup_scenario(self, handler, mock_popen=
self.__assert_systemctl_called(cmd="is-enabled", validate_command_called=True)
self.__assert_systemctl_called(cmd="enable", validate_command_called=True)
- self.__assert_systemctl_reloaded(validate_command_called=True)
self.__assert_firewall_cmd_running_called(validate_command_called=True)
self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.QueryPassThrough, validate_command_called=False)
self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.RemovePassThrough, validate_command_called=False)
@@ -234,7 +225,6 @@ def test_it_should_skip_setup_if_agent_network_setup_service_already_enabled_and
self.__assert_systemctl_called(cmd="is-enabled", validate_command_called=True)
self.__assert_systemctl_called(cmd="enable", validate_command_called=False)
- self.__assert_systemctl_reloaded(validate_command_called=False)
self.__assert_firewall_cmd_running_called(validate_command_called=True)
self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.QueryPassThrough, validate_command_called=False)
self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.RemovePassThrough, validate_command_called=False)
@@ -396,7 +386,6 @@ def test_it_should_delete_custom_service_files_if_firewalld_enabled(self):
self.__assert_firewall_called(cmd=FirewallCmdDirectCommands.PassThrough, validate_command_called=True)
self.__assert_systemctl_called(cmd="is-enabled", validate_command_called=False)
self.__assert_systemctl_called(cmd="enable", validate_command_called=False)
- self.__assert_systemctl_reloaded(validate_command_called=False)
self.assertFalse(os.path.exists(handler.get_service_file_path()), "Service unit file found")
self.assertFalse(os.path.exists(os.path.join(conf.get_lib_dir(), handler.BINARY_FILE_NAME)), "Binary file found")
diff --git a/tests/ga/test_policy_engine.py b/tests/ga/test_policy_engine.py
new file mode 100644
index 000000000..7f9a8bcc7
--- /dev/null
+++ b/tests/ga/test_policy_engine.py
@@ -0,0 +1,34 @@
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Requires Python 2.4+ and Openssl 1.0+
+#
+
+from tests.lib.tools import AgentTestCase
+from azurelinuxagent.ga.policy.policy_engine import PolicyEngine
+from tests.lib.tools import patch
+
+
+class TestPolicyEngine(AgentTestCase):
+ def test_policy_enforcement_should_be_enabled(self):
+ with patch('azurelinuxagent.ga.policy.policy_engine.conf.get_extension_policy_enabled', return_value=True):
+ engine = PolicyEngine()
+ self.assertTrue(engine.is_policy_enforcement_enabled(),
+ msg="Conf flag is set to true so policy enforcement should be enabled.")
+
+ def test_policy_enforcement_should_be_disabled(self):
+ engine = PolicyEngine()
+ self.assertFalse(engine.is_policy_enforcement_enabled(),
+ msg="Conf flag is set to false so policy enforcement should be disabled.")
+
diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py
index 9b20737cb..d5242f969 100644
--- a/tests/ga/test_update.py
+++ b/tests/ga/test_update.py
@@ -20,6 +20,8 @@
from datetime import datetime, timedelta
from threading import current_thread
+
+from azurelinuxagent.ga.agent_update_handler import INITIAL_UPDATE_STATE_FILE
from azurelinuxagent.ga.guestagent import GuestAgent, GuestAgentError, \
AGENT_ERROR_FILE
from tests.common.osutil.test_default import TestOSUtil
@@ -52,7 +54,7 @@
from tests.lib.mock_update_handler import mock_update_handler
from tests.lib.mock_wire_protocol import mock_wire_protocol, MockHttpResponse
from tests.lib.wire_protocol_data import DATA_FILE, DATA_FILE_MULTIPLE_EXT, DATA_FILE_VM_SETTINGS
-from tests.lib.tools import AgentTestCase, AgentTestCaseWithGetVmSizeMock, data_dir, DEFAULT, patch, load_bin_data, Mock, MagicMock, \
+from tests.lib.tools import AgentTestCase, data_dir, DEFAULT, patch, load_bin_data, Mock, MagicMock, \
clear_singleton_instances, is_python_version_26_or_34, skip_if_predicate_true
from tests.lib import wire_protocol_data
from tests.lib.http_request_predicates import HttpRequestPredicates
@@ -118,7 +120,7 @@ def _get_update_handler(iterations=1, test_data=None, protocol=None, autoupdate_
yield update_handler, protocol
-class UpdateTestCase(AgentTestCaseWithGetVmSizeMock):
+class UpdateTestCase(AgentTestCase):
_test_suite_tmp_dir = None
_agent_zip_dir = None
@@ -1281,6 +1283,9 @@ def update_goal_state_and_run_handler(autoupdate_enabled=True):
protocol.set_http_handlers(http_get_handler=get_handler, http_put_handler=put_handler)
+ # mocking first agent update attempted
+ open(os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE), "a").close()
+
# Case 1: rsm version missing in GS when vm opt-in for rsm upgrades; report missing rsm version error
protocol.mock_wire_data.set_extension_config("wire/ext_conf_version_missing_in_agent_family.xml")
update_goal_state_and_run_handler()
@@ -1480,7 +1485,10 @@ def create_conf_mocks(self, autoupdate_frequency, hotfix_frequency, normal_frequ
@contextlib.contextmanager
def __get_update_handler(self, iterations=1, test_data=None,
- reload_conf=None, autoupdate_frequency=0.001, hotfix_frequency=1.0, normal_frequency=2.0):
+ reload_conf=None, autoupdate_frequency=0.001, hotfix_frequency=1.0, normal_frequency=2.0, initial_update_attempted=True):
+
+ if initial_update_attempted:
+ open(os.path.join(conf.get_lib_dir(), INITIAL_UPDATE_STATE_FILE), "a").close()
test_data = DATA_FILE if test_data is None else test_data
# In _get_update_handler() contextmanager, yield is used inside an if-else block and that's creating a false positive pylint warning
@@ -1927,7 +1935,7 @@ def reload_conf(url, protocol):
@patch('azurelinuxagent.ga.update.get_collect_logs_handler')
@patch('azurelinuxagent.ga.update.get_monitor_handler')
@patch('azurelinuxagent.ga.update.get_env_handler')
-class MonitorThreadTest(AgentTestCaseWithGetVmSizeMock):
+class MonitorThreadTest(AgentTestCase):
def setUp(self):
super(MonitorThreadTest, self).setUp()
self.event_patch = patch('azurelinuxagent.common.event.add_event')
@@ -2440,11 +2448,11 @@ def test_telemetry_heartbeat_creates_event(self, patch_add_event, patch_info, *_
with mock_wire_protocol(wire_protocol_data.DATA_FILE) as mock_protocol:
update_handler = get_update_handler()
-
+ agent_update_handler = Mock()
update_handler.last_telemetry_heartbeat = datetime.utcnow() - timedelta(hours=1)
- update_handler._send_heartbeat_telemetry(mock_protocol)
+ update_handler._send_heartbeat_telemetry(mock_protocol, agent_update_handler)
self.assertEqual(1, patch_add_event.call_count)
- self.assertTrue(any(call_args[0] == "[HEARTBEAT] Agent {0} is running as the goal state agent {1}"
+ self.assertTrue(any(call_args[0] == "[HEARTBEAT] Agent {0} is running as the goal state agent [DEBUG {1}]"
for call_args in patch_info.call_args), "The heartbeat was not written to the agent's log")
diff --git a/tests/lib/mock_cgroup_environment.py b/tests/lib/mock_cgroup_environment.py
index d9f79cb6a..a8f5fa9a3 100644
--- a/tests/lib/mock_cgroup_environment.py
+++ b/tests/lib/mock_cgroup_environment.py
@@ -122,7 +122,9 @@
_MOCKED_FILES_V1 = [
("/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'v1', 'proc_self_cgroup')),
- (r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'v1', 'proc_pid_cgroup'))
+ (r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'v1', 'proc_pid_cgroup')),
+ (r"/sys/fs/cgroup/cpu,cpuacct/system.slice/walinuxagent.service/cgroup.procs", os.path.join(data_dir, 'cgroups', 'cgroup.procs')),
+ (r"/sys/fs/cgroup/memory/system.slice/walinuxagent.service/cgroup.procs", os.path.join(data_dir, 'cgroups', 'cgroup.procs'))
]
_MOCKED_FILES_V2 = [
@@ -130,7 +132,8 @@
(r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'v2', 'proc_pid_cgroup')),
("/sys/fs/cgroup/cgroup.subtree_control", os.path.join(data_dir, 'cgroups', 'v2', 'sys_fs_cgroup_cgroup.subtree_control')),
("/sys/fs/cgroup/azure.slice/cgroup.subtree_control", os.path.join(data_dir, 'cgroups', 'v2', 'sys_fs_cgroup_cgroup.subtree_control')),
- ("/sys/fs/cgroup/azure.slice/walinuxagent.service/cgroup.subtree_control", os.path.join(data_dir, 'cgroups', 'v2', 'sys_fs_cgroup_cgroup.subtree_control_empty'))
+ ("/sys/fs/cgroup/azure.slice/walinuxagent.service/cgroup.subtree_control", os.path.join(data_dir, 'cgroups', 'v2', 'sys_fs_cgroup_cgroup.subtree_control_empty')),
+ (r"/sys/fs/cgroup/system.slice/walinuxagent.service/cgroup.procs", os.path.join(data_dir, 'cgroups', 'cgroup.procs'))
]
_MOCKED_FILES_HYBRID = [
diff --git a/tests/lib/mock_command.py b/tests/lib/mock_command.py
index e181d26d9..83509c3d3 100755
--- a/tests/lib/mock_command.py
+++ b/tests/lib/mock_command.py
@@ -2,12 +2,18 @@
import os
import sys
-if len(sys.argv) != 4:
+if len(sys.argv) < 4:
sys.stderr.write("usage: {0} ".format(os.path.basename(__file__)))
# W0632: Possible unbalanced tuple unpacking with sequence: left side has 3 label(s), right side has 0 value(s) (unbalanced-tuple-unpacking)
# Disabled: Unpacking is balanced: there is a check for the length on line 5
-stdout, return_value, stderr = sys.argv[1:] # pylint: disable=W0632
+
+# This script will be used for mocking cgroups commands in test, when popen called this script will be executed instead of actual commands
+# We pass stdout, return_value, stderr of the mocked command output as arguments to this script and this script will print them to stdout, stderr and exit with the return value
+# So that popen gets the output of the mocked command. Ideally we should get 4 arguments in sys.argv, first one is the script name, next 3 are the actual command output
+# But somehow when we run the tests from pycharm, it adds extra arguments next to the script name, so we need to handle that when reading the arguments
+# ex: /home/nag/Documents/repos/WALinuxAgent/tests/lib/mock_command.py /snap/pycharm-professional/412/plugins/python-ce/helpers/py... +BLKID +ELFUTILS +KMOD -IDN2 +IDN -PCRE2 default-hierarchy=hybrid\n 0
+stdout, return_value, stderr = sys.argv[-3:] # pylint: disable=W0632
if stdout != '':
sys.stdout.write(stdout)
diff --git a/tests/lib/mock_environment.py b/tests/lib/mock_environment.py
index 8f5682cf8..5b7209358 100644
--- a/tests/lib/mock_environment.py
+++ b/tests/lib/mock_environment.py
@@ -76,12 +76,14 @@ def __init__(self, tmp_dir, commands=None, paths=None, files=None, data_files=No
self._original_popen = subprocess.Popen
self._original_mkdir = fileutil.mkdir
self._original_path_exists = os.path.exists
+ self._original_os_remove = os.remove
self._original_open = open
self.patchers = [
patch_builtin("open", side_effect=self._mock_open),
patch("subprocess.Popen", side_effect=self._mock_popen),
patch("os.path.exists", side_effect=self._mock_path_exists),
+ patch("os.remove", side_effect=self._mock_os_remove),
patch("azurelinuxagent.common.utils.fileutil.mkdir", side_effect=self._mock_mkdir)
]
@@ -166,3 +168,6 @@ def _mock_open(self, path, *args, **kwargs):
def _mock_path_exists(self, path):
return self._original_path_exists(self.get_mapped_path(path))
+ def _mock_os_remove(self, path):
+ return self._original_os_remove(self.get_mapped_path(path))
+
diff --git a/tests/lib/mock_wire_protocol.py b/tests/lib/mock_wire_protocol.py
index 78cbc59e2..2cf2b10e0 100644
--- a/tests/lib/mock_wire_protocol.py
+++ b/tests/lib/mock_wire_protocol.py
@@ -22,7 +22,7 @@
@contextlib.contextmanager
-def mock_wire_protocol(mock_wire_data_file, http_get_handler=None, http_post_handler=None, http_put_handler=None, do_not_mock=lambda method, url: False, fail_on_unknown_request=True, save_to_history=False):
+def mock_wire_protocol(mock_wire_data_file, http_get_handler=None, http_post_handler=None, http_put_handler=None, do_not_mock=lambda method, url: False, fail_on_unknown_request=True, save_to_history=False, detect_protocol=True):
"""
Creates a WireProtocol object that handles requests to the WireServer, the Host GA Plugin, and some requests to storage (requests that provide mock data
in wire_protocol_data.py).
@@ -149,7 +149,8 @@ def stop():
# go do it
try:
protocol.start()
- protocol.detect(save_to_history=save_to_history)
+ if detect_protocol:
+ protocol.detect(save_to_history=save_to_history)
yield protocol
finally:
protocol.stop()
diff --git a/tests/lib/tools.py b/tests/lib/tools.py
index dd0d96172..fc1f72150 100644
--- a/tests/lib/tools.py
+++ b/tests/lib/tools.py
@@ -42,9 +42,6 @@
try:
from unittest.mock import Mock, patch, MagicMock, ANY, DEFAULT, call, PropertyMock # pylint: disable=unused-import
-
- # Import mock module for Python2 and Python3
- from bin.waagent2 import Agent # pylint: disable=unused-import
except ImportError:
from mock import Mock, patch, MagicMock, ANY, DEFAULT, call, PropertyMock
@@ -447,22 +444,6 @@ def create_script(script_file, contents):
os.chmod(script_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
-class AgentTestCaseWithGetVmSizeMock(AgentTestCase):
-
- def setUp(self):
-
- self._get_vm_size_patch = patch('azurelinuxagent.ga.update.UpdateHandler._get_vm_size', return_value="unknown")
- self._get_vm_size_patch.start()
-
- super(AgentTestCaseWithGetVmSizeMock, self).setUp()
-
- def tearDown(self):
-
- if self._get_vm_size_patch:
- self._get_vm_size_patch.stop()
-
- super(AgentTestCaseWithGetVmSizeMock, self).tearDown()
-
def load_data(name):
"""Load test data"""
path = os.path.join(data_dir, name)
diff --git a/tests/test_agent.py b/tests/test_agent.py
index 4b643ca36..15e7b05bf 100644
--- a/tests/test_agent.py
+++ b/tests/test_agent.py
@@ -24,9 +24,10 @@
from azurelinuxagent.common.exception import CGroupsException
from azurelinuxagent.ga import logcollector, cgroupconfigurator
from azurelinuxagent.common.utils import fileutil
-from azurelinuxagent.ga.cgroupapi import get_cgroup_api, InvalidCgroupMountpointException
+from azurelinuxagent.ga.cgroupapi import InvalidCgroupMountpointException, CgroupV1, CgroupV2
from azurelinuxagent.ga.collect_logs import CollectLogsHandler
-from tests.lib.mock_cgroup_environment import mock_cgroup_v1_environment
+from azurelinuxagent.ga.cgroupcontroller import AGENT_LOG_COLLECTOR
+from tests.lib.mock_cgroup_environment import mock_cgroup_v1_environment, mock_cgroup_v2_environment
from tests.lib.tools import AgentTestCase, data_dir, Mock, patch
EXPECTED_CONFIGURATION = \
@@ -44,13 +45,14 @@
Debug.CgroupDisableOnProcessCheckFailure = True
Debug.CgroupDisableOnQuotaCheckFailure = True
Debug.CgroupLogMetrics = False
-Debug.CgroupMonitorExpiryTime = 2022-03-31
-Debug.CgroupMonitorExtensionName = Microsoft.Azure.Monitor.AzureMonitorLinuxAgent
Debug.EnableAgentMemoryUsageCheck = False
+Debug.EnableCgroupV2ResourceLimiting = False
+Debug.EnableExtensionPolicy = False
Debug.EnableFastTrack = True
Debug.EnableGAVersioning = True
Debug.EtpCollectionPeriod = 300
Debug.FirewallRulesLogPeriod = 86400
+Debug.LogCollectorInitialDelay = 300
DetectScvmmEnv = False
EnableOverProvisioning = True
Extension.LogDir = /var/log/azure
@@ -231,7 +233,7 @@ def test_rejects_invalid_log_collector_mode(self, mock_exit, mock_stderr): # py
@patch("azurelinuxagent.agent.LogCollector")
def test_calls_collect_logs_with_proper_mode(self, mock_log_collector, *args): # pylint: disable=unused-argument
agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf"))
- mock_log_collector.run = Mock()
+ mock_log_collector.return_value.collect_logs_and_get_archive.return_value = (Mock(), Mock()) # LogCollector.collect_logs_and_get_archive returns a tuple
agent.collect_logs(is_full_mode=True)
full_mode = mock_log_collector.call_args_list[0][0][0]
@@ -245,22 +247,57 @@ def test_calls_collect_logs_with_proper_mode(self, mock_log_collector, *args):
def test_calls_collect_logs_on_valid_cgroups_v1(self, mock_log_collector):
try:
CollectLogsHandler.enable_monitor_cgroups_check()
- mock_log_collector.run = Mock()
-
- # Mock cgroup paths so process is in the log collector slice
- def mock_cgroup_paths(*args, **kwargs):
- if args and args[0] == "self":
- relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT)
- return (relative_path, relative_path)
- return get_cgroup_api().get_process_cgroup_relative_paths(*args, **kwargs)
+ mock_log_collector.return_value.collect_logs_and_get_archive.return_value = (Mock(), Mock()) # LogCollector.collect_logs_and_get_archive returns a tuple
+
+ # Mock cgroup so process is in the log collector slice
+ def mock_cgroup(*args, **kwargs): # pylint: disable=W0613
+ relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT)
+ return CgroupV1(
+ cgroup_name=AGENT_LOG_COLLECTOR,
+ controller_mountpoints={
+ 'cpu,cpuacct':"/sys/fs/cgroup/cpu,cpuacct",
+ 'memory':"/sys/fs/cgroup/memory"
+ },
+ controller_paths={
+ 'cpu,cpuacct':"/sys/fs/cgroup/cpu,cpuacct/{0}".format(relative_path),
+ 'memory':"/sys/fs/cgroup/memory/{0}".format(relative_path)
+ }
+ )
with mock_cgroup_v1_environment(self.tmp_dir):
- with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup_paths",
- side_effect=mock_cgroup_paths):
+ with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup",
+ side_effect=mock_cgroup):
+ agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf"))
+ agent.collect_logs(is_full_mode=True)
+
+ self.assertEqual(1, mock_log_collector.call_count, "LogCollector should be called once")
+
+ finally:
+ CollectLogsHandler.disable_monitor_cgroups_check()
+
+ @patch("azurelinuxagent.agent.LogCollector")
+ def test_calls_collect_logs_on_valid_cgroups_v2(self, mock_log_collector):
+ try:
+ CollectLogsHandler.enable_monitor_cgroups_check()
+ mock_log_collector.return_value.collect_logs_and_get_archive.return_value = (
+ Mock(), Mock()) # LogCollector.collect_logs_and_get_archive returns a tuple
+
+ # Mock cgroup so process is in the log collector slice
+ def mock_cgroup(*args, **kwargs): # pylint: disable=W0613
+ relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT)
+ return CgroupV2(
+ cgroup_name=AGENT_LOG_COLLECTOR,
+ root_cgroup_path="/sys/fs/cgroup",
+ cgroup_path="/sys/fs/cgroup/{0}".format(relative_path),
+ enabled_controllers=["cpu", "memory"]
+ )
+
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup", side_effect=mock_cgroup):
agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf"))
agent.collect_logs(is_full_mode=True)
- mock_log_collector.assert_called_once()
+ self.assertEqual(1, mock_log_collector.call_count, "LogCollector should be called once")
finally:
CollectLogsHandler.disable_monitor_cgroups_check()
@@ -296,17 +333,59 @@ def test_doesnt_call_collect_logs_on_invalid_cgroups_v1(self, mock_log_collector
CollectLogsHandler.enable_monitor_cgroups_check()
mock_log_collector.run = Mock()
- # Mock cgroup paths so process is in incorrect slice
- def mock_cgroup_paths(*args, **kwargs):
- if args and args[0] == "self":
- return ("NOT_THE_CORRECT_PATH", "NOT_THE_CORRECT_PATH")
- return get_cgroup_api().get_process_cgroup_relative_paths(*args, **kwargs)
+ # Mock cgroup so process is in incorrect slice
+ def mock_cgroup(*args, **kwargs): # pylint: disable=W0613
+ relative_path = "NOT_THE_CORRECT_PATH"
+ return CgroupV1(
+ cgroup_name=AGENT_LOG_COLLECTOR,
+ controller_mountpoints={
+ 'cpu,cpuacct': "/sys/fs/cgroup/cpu,cpuacct",
+ 'memory': "/sys/fs/cgroup/memory"
+ },
+ controller_paths={
+ 'cpu,cpuacct': "/sys/fs/cgroup/cpu,cpuacct/{0}".format(relative_path),
+ 'memory': "/sys/fs/cgroup/memory/{0}".format(relative_path)
+ }
+ )
def raise_on_sys_exit(*args):
raise RuntimeError(args[0] if args else "Exiting")
with mock_cgroup_v1_environment(self.tmp_dir):
- with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup_paths", side_effect=mock_cgroup_paths):
+ with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup", side_effect=mock_cgroup):
+ agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf"))
+
+ with patch("sys.exit", side_effect=raise_on_sys_exit) as mock_exit:
+ try:
+ agent.collect_logs(is_full_mode=True)
+ except RuntimeError as re:
+ self.assertEqual(logcollector.INVALID_CGROUPS_ERRCODE, re.args[0])
+ mock_exit.assert_called_once_with(logcollector.INVALID_CGROUPS_ERRCODE)
+ finally:
+ CollectLogsHandler.disable_monitor_cgroups_check()
+
+ @patch("azurelinuxagent.agent.LogCollector")
+ def test_doesnt_call_collect_logs_on_invalid_cgroups_v2(self, mock_log_collector):
+ try:
+ CollectLogsHandler.enable_monitor_cgroups_check()
+ mock_log_collector.run = Mock()
+
+ # Mock cgroup so process is in incorrect slice
+ def mock_cgroup(*args, **kwargs): # pylint: disable=W0613
+ relative_path = "NOT_THE_CORRECT_PATH"
+ return CgroupV2(
+ cgroup_name=AGENT_LOG_COLLECTOR,
+ root_cgroup_path="/sys/fs/cgroup",
+ cgroup_path="/sys/fs/cgroup/{0}".format(relative_path),
+ enabled_controllers=["cpu", "memory"]
+ )
+
+ def raise_on_sys_exit(*args):
+ raise RuntimeError(args[0] if args else "Exiting")
+
+ with mock_cgroup_v2_environment(self.tmp_dir):
+ with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv2.get_process_cgroup",
+ side_effect=mock_cgroup):
agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf"))
with patch("sys.exit", side_effect=raise_on_sys_exit) as mock_exit:
@@ -346,19 +425,25 @@ def test_doesnt_call_collect_logs_if_either_controller_not_mounted(self, mock_lo
CollectLogsHandler.enable_monitor_cgroups_check()
mock_log_collector.run = Mock()
- # Mock cgroup paths so process is in the log collector slice and cpu is not mounted
- def mock_cgroup_paths(*args, **kwargs):
- if args and args[0] == "self":
- relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT)
- return (None, relative_path)
- return get_cgroup_api().get_process_cgroup_relative_paths(*args, **kwargs)
+ # Mock cgroup so process is in the log collector slice and cpu is not mounted
+ def mock_cgroup(*args, **kwargs): # pylint: disable=W0613
+ relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT)
+ return CgroupV1(
+ cgroup_name=AGENT_LOG_COLLECTOR,
+ controller_mountpoints={
+ 'memory': "/sys/fs/cgroup/memory"
+ },
+ controller_paths={
+ 'memory': "/sys/fs/cgroup/memory/{0}".format(relative_path)
+ }
+ )
def raise_on_sys_exit(*args):
raise RuntimeError(args[0] if args else "Exiting")
with mock_cgroup_v1_environment(self.tmp_dir):
- with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup_paths",
- side_effect=mock_cgroup_paths):
+ with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup",
+ side_effect=mock_cgroup):
agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf"))
with patch("sys.exit", side_effect=raise_on_sys_exit) as mock_exit:
@@ -369,7 +454,41 @@ def raise_on_sys_exit(*args):
mock_exit.assert_called_once_with(logcollector.INVALID_CGROUPS_ERRCODE)
finally:
CollectLogsHandler.disable_monitor_cgroups_check()
-
+
+ @patch("azurelinuxagent.agent.LogCollector")
+ @patch("azurelinuxagent.ga.collect_logs.LogCollectorMonitorHandler.get_max_recorded_metrics")
+ def test_collect_log_should_output_resource_usage_summary(self, mock_get_max_recorded_metrics, mock_log_collector):
+ try:
+ CollectLogsHandler.enable_monitor_cgroups_check()
+ mock_log_collector.return_value.collect_logs_and_get_archive.return_value = (Mock(), Mock()) # LogCollector.collect_logs_and_get_archive returns a tuple
+ mock_get_max_recorded_metrics.return_value = {}
+
+ # Mock cgroup so process is in the log collector slice
+ def mock_cgroup(*args, **kwargs): # pylint: disable=W0613
+ relative_path = "{0}/{1}".format(cgroupconfigurator.LOGCOLLECTOR_SLICE, logcollector.CGROUPS_UNIT)
+ return CgroupV1(
+ cgroup_name=AGENT_LOG_COLLECTOR,
+ controller_mountpoints={
+ 'cpu,cpuacct': "/sys/fs/cgroup/cpu,cpuacct",
+ 'memory': "/sys/fs/cgroup/memory"
+ },
+ controller_paths={
+ 'cpu,cpuacct': "/sys/fs/cgroup/cpu,cpuacct/{0}".format(relative_path),
+ 'memory': "/sys/fs/cgroup/memory/{0}".format(relative_path)
+ }
+ )
+
+ with mock_cgroup_v1_environment(self.tmp_dir):
+ with patch("azurelinuxagent.ga.cgroupapi.SystemdCgroupApiv1.get_process_cgroup", side_effect=mock_cgroup):
+ agent = Agent(False, conf_file_path=os.path.join(data_dir, "test_waagent.conf"))
+ agent.collect_logs(is_full_mode=True)
+
+ self.assertEqual(1, mock_log_collector.call_count, "LogCollector should be called once")
+ self.assertEqual(1, mock_get_max_recorded_metrics.call_count, "get_max_recorded_metrics should be called once")
+
+ finally:
+ CollectLogsHandler.disable_monitor_cgroups_check()
+
def test_it_should_parse_setup_firewall_properly(self):
test_firewall_meta = {
diff --git a/tests_e2e/orchestrator/docker/Dockerfile b/tests_e2e/orchestrator/docker/Dockerfile
index 219c9b869..f71d6c02e 100644
--- a/tests_e2e/orchestrator/docker/Dockerfile
+++ b/tests_e2e/orchestrator/docker/Dockerfile
@@ -67,7 +67,7 @@ RUN \
cd $HOME && \
git clone https://github.com/microsoft/lisa.git && \
cd lisa && \
- git checkout 95c09ff7d5b6e71d1642a628607ac9bb441c69f5 && \
+ git checkout 0e37ed07304b74362cfb3d3c55ac932d3bdc660c && \
\
python3 -m pip install --upgrade pip && \
python3 -m pip install --editable .[azure,libvirt] --config-settings editable_mode=compat && \
diff --git a/tests_e2e/orchestrator/lib/agent_test_loader.py b/tests_e2e/orchestrator/lib/agent_test_loader.py
index f952f1160..ba54f0b59 100644
--- a/tests_e2e/orchestrator/lib/agent_test_loader.py
+++ b/tests_e2e/orchestrator/lib/agent_test_loader.py
@@ -83,6 +83,28 @@ class VmImageInfo(object):
def __str__(self):
return self.urn
+class CustomImage(object):
+
+ # Images from a gallery are given as "//".
+ _IMAGE_FROM_GALLERY = re.compile(r"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)")
+
+ @staticmethod
+ def _is_image_from_gallery(image: str) -> bool:
+ """
+ Verifies if image is from shared gallery
+ """
+ return CustomImage._IMAGE_FROM_GALLERY.match(image) is not None
+
+ @staticmethod
+ def _get_name_of_image_from_gallery(image: str) -> str:
+ """
+ Get image name from shared gallery
+ """
+ match = CustomImage._IMAGE_FROM_GALLERY.match(image)
+ if match is None:
+ raise Exception(f"Invalid image from gallery: {image}")
+ return match.group('image')
+
class AgentTestLoader(object):
"""
@@ -134,6 +156,7 @@ def _validate(self):
"""
Performs some basic validations on the data loaded from the YAML description files
"""
+
def _parse_image(image: str) -> str:
"""
Parses a reference to an image or image set and returns the name of the image or image set
@@ -147,8 +170,11 @@ def _parse_image(image: str) -> str:
# Validate that the images the suite must run on are in images.yml
for image in suite.images:
image = _parse_image(image)
+ # skip validation if suite image from gallery image
+ if CustomImage._is_image_from_gallery(image):
+ continue
if image not in self.images:
- raise Exception(f"Invalid image reference in test suite {suite.name}: Can't find {image} in images.yml")
+ raise Exception(f"Invalid image reference in test suite {suite.name}: Can't find {image} in images.yml or image from a shared gallery")
# If the suite specifies a cloud and it's location, validate that location string is start with and then validate that the images it uses are available in that location
for suite_location in suite.locations:
@@ -158,6 +184,9 @@ def _parse_image(image: str) -> str:
continue
for suite_image in suite.images:
suite_image = _parse_image(suite_image)
+ # skip validation if suite image from gallery image
+ if CustomImage._is_image_from_gallery(suite_image):
+ continue
for image in self.images[suite_image]:
# If the image has a location restriction, validate that it is available on the location the suite must run on
if image.locations:
@@ -208,8 +237,8 @@ def _load_test_suite(description_file: Path) -> TestSuiteInfo:
rest of the tests in the suite will not be executed). By default, a failure on a test does not stop execution of
the test suite.
* images - A string, or a list of strings, specifying the images on which the test suite must be executed. Each value
- can be the name of a single image (e.g."ubuntu_2004"), or the name of an image set (e.g. "endorsed"). The
- names for images and image sets are defined in WALinuxAgent/tests_e2e/tests_suites/images.yml.
+ can be the name of a single image (e.g."ubuntu_2004"), or the name of an image set (e.g. "endorsed") or shared gallery image(e.g. "gallery/wait-cloud-init/1.0.2").
+ The names for images and image sets are defined in WALinuxAgent/tests_e2e/tests_suites/images.yml.
* locations - [Optional; string or list of strings] If given, the test suite must be executed on that cloud location(e.g. "AzureCloud:eastus2euap").
If not specified, or set to an empty string, the test suite will be executed in the default location. This is useful
for test suites that exercise a feature that is enabled only in certain regions.
diff --git a/tests_e2e/orchestrator/lib/agent_test_suite_combinator.py b/tests_e2e/orchestrator/lib/agent_test_suite_combinator.py
index 1450398c8..07bb36632 100644
--- a/tests_e2e/orchestrator/lib/agent_test_suite_combinator.py
+++ b/tests_e2e/orchestrator/lib/agent_test_suite_combinator.py
@@ -22,7 +22,7 @@
from lisa.messages import TestStatus, TestResultMessage # pylint: disable=E0401
from lisa.util import field_metadata # pylint: disable=E0401
-from tests_e2e.orchestrator.lib.agent_test_loader import AgentTestLoader, VmImageInfo, TestSuiteInfo
+from tests_e2e.orchestrator.lib.agent_test_loader import AgentTestLoader, VmImageInfo, TestSuiteInfo, CustomImage
from tests_e2e.tests.lib.logging import set_thread_name
from tests_e2e.tests.lib.virtual_machine_client import VirtualMachineClient
from tests_e2e.tests.lib.virtual_machine_scale_set_client import VirtualMachineScaleSetClient
@@ -171,10 +171,10 @@ def create_environment_list(self, test_suites: List[str]) -> List[Dict[str, Any]
vhd = image.urn
image_name = urllib.parse.urlparse(vhd).path.split('/')[-1] # take the last fragment of the URL's path (e.g. "RHEL_8_Standard-8.3.202006170423.vhd")
shared_gallery = ""
- elif self._is_image_from_gallery(image.urn):
+ elif CustomImage._is_image_from_gallery(image.urn):
marketplace_image = ""
vhd = ""
- image_name = self._get_name_of_image_from_gallery(image.urn)
+ image_name = CustomImage._get_name_of_image_from_gallery(image.urn)
shared_gallery = image.urn
else:
marketplace_image = image.urn
@@ -451,7 +451,7 @@ def _get_runbook_images(self, loader: AgentTestLoader) -> List[VmImageInfo]:
return images
# If it is not image or image set, it must be a URN, VHD, or an image from a gallery
- if not self._is_urn(self.runbook.image) and not self._is_vhd(self.runbook.image) and not self._is_image_from_gallery(self.runbook.image):
+ if not self._is_urn(self.runbook.image) and not self._is_vhd(self.runbook.image) and not CustomImage._is_image_from_gallery(self.runbook.image):
raise Exception(f"The 'image' parameter must be an image, image set name, urn, vhd, or an image from a shared gallery: {self.runbook.image}")
i = VmImageInfo()
@@ -472,7 +472,15 @@ def _get_test_suite_images(suite: TestSuiteInfo, loader: AgentTestLoader) -> Lis
for image in suite.images:
match = AgentTestLoader.RANDOM_IMAGES_RE.match(image)
if match is None:
- image_list = loader.images[image]
+ # Added this condition for galley image as they don't have definition in images.yml
+ if CustomImage._is_image_from_gallery(image):
+ i = VmImageInfo()
+ i.urn = image
+ i.locations = []
+ i.vm_sizes = []
+ image_list = [i]
+ else:
+ image_list = loader.images[image]
else:
count = match.group('count')
if count is None:
@@ -566,20 +574,6 @@ def _is_vhd(vhd: str) -> bool:
parsed = urllib.parse.urlparse(vhd)
return parsed.scheme == 'https' and parsed.netloc != "" and parsed.path != ""
- # Images from a gallery are given as "//".
- _IMAGE_FROM_GALLERY = re.compile(r"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)")
-
- @staticmethod
- def _is_image_from_gallery(image: str) -> bool:
- return AgentTestSuitesCombinator._IMAGE_FROM_GALLERY.match(image) is not None
-
- @staticmethod
- def _get_name_of_image_from_gallery(image: str) -> bool:
- match = AgentTestSuitesCombinator._IMAGE_FROM_GALLERY.match(image)
- if match is None:
- raise Exception(f"Invalid image from gallery: {image}")
- return match.group('image')
-
@staticmethod
def _report_test_result(
suite_name: str,
diff --git a/tests_e2e/orchestrator/runbook.yml b/tests_e2e/orchestrator/runbook.yml
index dfed709af..b96cc5107 100644
--- a/tests_e2e/orchestrator/runbook.yml
+++ b/tests_e2e/orchestrator/runbook.yml
@@ -54,6 +54,7 @@ variable:
- publish_hostname
- recover_network_interface
- cgroup_v2_disabled
+ - log_collector
#
# Additional arguments pass to the test suites
diff --git a/tests_e2e/test_suites/agent_persist_firewall.yml b/tests_e2e/test_suites/agent_persist_firewall.yml
index f749046a2..57c8f6b5b 100644
--- a/tests_e2e/test_suites/agent_persist_firewall.yml
+++ b/tests_e2e/test_suites/agent_persist_firewall.yml
@@ -14,6 +14,9 @@ owns_vm: true # This vm cannot be shared with other tests because it modifies t
# so skipping the test run on flatcar distro.
# (2023-11-14T19:04:13.738695Z ERROR ExtHandler ExtHandler Unable to setup the persistent firewall rules: [Errno 30] Read-only file system: '/lib/systemd/system/waagent-network-setup.service)
skip_on_images:
+ - "azure-linux_3" # TODO: the test in unstable on Azure Linux 3; skipping for now
+ - "azure-linux_3_fips" # TODO: the test in unstable on Azure Linux 3; skipping for now
+ - "azure-linux_3_arm64" # TODO: the test in unstable on Azure Linux 3; skipping for now
- "debian_9" # TODO: Reboot is slow on debian_9. Need to investigate further.
- "flatcar"
- "flatcar_arm64"
diff --git a/tests_e2e/test_suites/agent_wait_for_cloud_init.yml b/tests_e2e/test_suites/agent_wait_for_cloud_init.yml
index 727803811..154e18349 100644
--- a/tests_e2e/test_suites/agent_wait_for_cloud_init.yml
+++ b/tests_e2e/test_suites/agent_wait_for_cloud_init.yml
@@ -2,12 +2,11 @@
# This test verifies that the Agent waits for cloud-init to complete before it starts processing extensions.
#
# NOTE: This test is not fully automated. It requires a custom image where the test Agent has been installed and Extensions.WaitForCloudInit is enabled in waagent.conf.
-# To execute it manually, create a custom image and use the 'image' runbook parameter, for example: "-v: image:gallery/wait-cloud-init/1.0.1".
+# To execute it manually, create a custom image and use the 'image' runbook parameter, for example: "-v: image:gallery/wait-cloud-init/1.0.2".
#
name: "AgentWaitForCloudInit"
tests:
- "agent_wait_for_cloud_init/agent_wait_for_cloud_init.py"
template: "agent_wait_for_cloud_init/add_cloud_init_script.py"
install_test_agent: false
-# Dummy image, since the parameter is required. The actual image needs to be passed as a parameter to the runbook.
-images: "ubuntu_2204"
+images: "gallery/wait-cloud-init/1.0.2"
diff --git a/tests_e2e/test_suites/ext_sequencing.yml b/tests_e2e/test_suites/ext_sequencing.yml
index 1976a8502..f710a45d4 100644
--- a/tests_e2e/test_suites/ext_sequencing.yml
+++ b/tests_e2e/test_suites/ext_sequencing.yml
@@ -7,4 +7,4 @@ tests:
- "ext_sequencing/ext_sequencing.py"
images: "endorsed"
# This scenario is executed on instances of a scaleset created by the agent test suite.
-executes_on_scale_set: true
\ No newline at end of file
+executes_on_scale_set: true
diff --git a/tests_e2e/test_suites/images.yml b/tests_e2e/test_suites/images.yml
index fb6cc7f1f..ef0fa434c 100644
--- a/tests_e2e/test_suites/images.yml
+++ b/tests_e2e/test_suites/images.yml
@@ -17,8 +17,9 @@ image-sets:
- "debian_11"
- "flatcar"
- "suse_12"
- - "mariner_1"
- "mariner_2"
+ - "azure-linux_3"
+ - "azure-linux_3_fips"
- "suse_15"
- "rhel_79"
- "rhel_82"
@@ -37,8 +38,10 @@ image-sets:
- "debian_11_arm64"
- "flatcar_arm64"
- "mariner_2_arm64"
+ - "azure-linux_3_arm64"
- "rhel_90_arm64"
- "ubuntu_2204_arm64"
+ - "ubuntu_2404_arm64"
# As of today agent only support and enabled resource governance feature on following distros
cgroups-endorsed:
@@ -54,6 +57,20 @@ image-sets:
- "oracle_610"
- "rhel_610"
+ # These are the distros which have periodic log collector support.
+ log-collector-endorsed:
+ - "centos_82"
+ - "rhel_82"
+ - "ubuntu_1604"
+ - "ubuntu_1804"
+ - "ubuntu_2004"
+ - "ubuntu_2204"
+ - "ubuntu_2204_minimal"
+ - "ubuntu_2204_arm64"
+ - "ubuntu_2404"
+ - "ubuntu_2404_minimal"
+ - "ubuntu_2404_arm64"
+
#
# An image can be specified by a string giving its urn, as in
#
@@ -87,6 +104,21 @@ images:
urn: "almalinux almalinux 9-gen2 latest"
locations:
AzureChinaCloud: []
+ azure-linux_3:
+ urn: "microsoftcblmariner azure-linux-3 azure-linux-3 latest"
+ locations:
+ AzureUSGovernment: []
+ AzureChinaCloud: []
+ azure-linux_3_fips:
+ urn: "microsoftcblmariner azure-linux-3 azure-linux-3-gen2-fips latest"
+ locations:
+ AzureUSGovernment: []
+ AzureChinaCloud: []
+ azure-linux_3_arm64:
+ urn: "microsoftcblmariner azure-linux-3 azure-linux-3-arm64 latest"
+ locations:
+ AzureUSGovernment: []
+ AzureChinaCloud: []
centos_610: "OpenLogic CentOS 6.10 latest"
centos_75: "OpenLogic CentOS 7.5 latest"
centos_79: "OpenLogic CentOS 7_9 latest"
@@ -181,9 +213,9 @@ images:
AzureChinaCloud: []
AzureUSGovernment: []
ubuntu_2204_minimal: "Canonical 0001-com-ubuntu-minimal-jammy minimal-22_04-lts-gen2 latest"
- ubuntu_2404:
- # TODO: Currently using the daily build, update to the release build once it is available
- urn: "Canonical 0001-com-ubuntu-server-noble-daily 24_04-daily-lts-gen2 latest"
+ ubuntu_2404: "Canonical ubuntu-24_04-lts server latest"
+ ubuntu_2404_arm64:
+ urn: "Canonical ubuntu-24_04-lts server-arm64 latest"
locations:
AzureChinaCloud: []
AzureUSGovernment: []
diff --git a/tests_e2e/test_suites/initial_agent_update.yml b/tests_e2e/test_suites/initial_agent_update.yml
new file mode 100644
index 000000000..6dc039d62
--- /dev/null
+++ b/tests_e2e/test_suites/initial_agent_update.yml
@@ -0,0 +1,13 @@
+#
+# This test verifies that the Agent does initial update on very first goal state before it starts processing extensions for new vms that are enrolled into RSM.
+#
+# NOTE: This test_suite is not fully automated. It requires a custom image where custom pre-installed Agent has been installed with version 2.8.9.9. Creation of custom images is not automated currently.
+# But daily run is automated and test suite will pass shared gallery custom image reference in images list
+#
+#
+name: "InitialAgentUpdate"
+tests:
+ - "initial_agent_update/initial_agent_update.py"
+install_test_agent: false
+images: "gallery/initial-agent-update/1.0.0"
+locations: "AzureCloud:eastus2euap"
diff --git a/tests_e2e/test_suites/log_collector.yml b/tests_e2e/test_suites/log_collector.yml
new file mode 100644
index 000000000..496198f92
--- /dev/null
+++ b/tests_e2e/test_suites/log_collector.yml
@@ -0,0 +1,8 @@
+#
+# This test is used to verify that the log collector logs the expected behavior on periodic runs.
+#
+name: "LogCollector"
+tests:
+ - "log_collector/log_collector.py"
+images:
+ - "random(log-collector-endorsed, 1)"
diff --git a/tests_e2e/test_suites/multi_config_ext.yml b/tests_e2e/test_suites/multi_config_ext.yml
index 24bdaa736..1856a4d06 100644
--- a/tests_e2e/test_suites/multi_config_ext.yml
+++ b/tests_e2e/test_suites/multi_config_ext.yml
@@ -7,3 +7,6 @@ tests:
- "multi_config_ext/multi_config_ext.py"
images:
- "endorsed"
+# TODO: This test has been failing due to issues in the RC2 extension on AzureCloud. Re-enable once the extension has been fixed.
+skip_on_clouds:
+ - "AzureCloud"
diff --git a/tests_e2e/tests/agent_publish/agent_publish.py b/tests_e2e/tests/agent_publish/agent_publish.py
index 83c3f7160..957dfd4fc 100644
--- a/tests_e2e/tests/agent_publish/agent_publish.py
+++ b/tests_e2e/tests/agent_publish/agent_publish.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
@@ -21,6 +20,7 @@
from assertpy import fail
+from tests_e2e.tests.lib.agent_setup_helpers import wait_for_agent_to_complete_provisioning
from tests_e2e.tests.lib.agent_test import AgentVmTest
from tests_e2e.tests.lib.agent_test_context import AgentVmTestContext
from tests_e2e.tests.lib.agent_update_helpers import request_rsm_update
@@ -49,6 +49,8 @@ def run(self):
3. Check for agent update from the log and waagent version
4. Ensure CSE is working
"""
+ # Since we skip install_agent setup, doing it here for the Agent to complete provisioning before starting the test
+ wait_for_agent_to_complete_provisioning(self._ssh_client)
self._get_agent_info()
log.info("Testing rsm update flow....")
diff --git a/tests_e2e/tests/agent_wait_for_cloud_init/add_cloud_init_script.py b/tests_e2e/tests/agent_wait_for_cloud_init/add_cloud_init_script.py
index 14f2cdeca..0c1a6611b 100755
--- a/tests_e2e/tests/agent_wait_for_cloud_init/add_cloud_init_script.py
+++ b/tests_e2e/tests/agent_wait_for_cloud_init/add_cloud_init_script.py
@@ -35,10 +35,10 @@ def update(self, template: Dict[str, Any], is_lisa_template: bool) -> None:
#
# cloud-init configuration needs to be added in the osProfile.customData property as a base64-encoded string.
#
- # LISA uses the getOSProfile function to generate the value for osProfile; add customData to its output, checking that we do not
+ # LISA uses the generateOsProfile function to generate the value for osProfile; add customData to its output, checking that we do not
# override any existing value (the current LISA template does not have any).
#
- # "getOSProfile": {
+ # "generateOsProfile": {
# "parameters": [
# ...
# ],
@@ -55,7 +55,7 @@ def update(self, template: Dict[str, Any], is_lisa_template: bool) -> None:
#
encoded_script = base64.b64encode(AgentWaitForCloudInit.CloudInitScript.encode('utf-8')).decode('utf-8')
- get_os_profile = self.get_lisa_function(template, 'getOsProfile')
+ get_os_profile = self.get_lisa_function(template, 'generateOsProfile')
output = self.get_function_output(get_os_profile)
if output.get('customData') is not None:
raise Exception(f"The getOSProfile function already has a 'customData'. Won't override it. Definition: {get_os_profile}")
diff --git a/tests_e2e/tests/ext_cgroups/install_extensions.py b/tests_e2e/tests/ext_cgroups/install_extensions.py
index aebc6e3c0..b54be86ec 100644
--- a/tests_e2e/tests/ext_cgroups/install_extensions.py
+++ b/tests_e2e/tests/ext_cgroups/install_extensions.py
@@ -16,7 +16,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-from datetime import datetime, timedelta
from pathlib import Path
from tests_e2e.tests.lib.agent_test_context import AgentVmTestContext
@@ -35,7 +34,6 @@ def __init__(self, context: AgentVmTestContext):
self._ssh_client = self._context.create_ssh_client()
def run(self):
- self._prepare_agent()
# Install the GATest extension to test service cgroups
self._install_gatest_extension()
# Install the Azure Monitor Agent to test long running process cgroup
@@ -45,18 +43,6 @@ def run(self):
# Install the CSE extension to test extension cgroup
self._install_cse()
- def _prepare_agent(self):
- log.info("=====Executing update-waagent-conf remote script to update monitoring deadline flag for tracking azuremonitoragent service")
- future_date = datetime.utcnow() + timedelta(days=2)
- expiry_time = future_date.date().strftime("%Y-%m-%d")
- # Agent needs extension info and it's services info in the handlermanifest.xml to monitor and limit the resource usage.
- # As part of pilot testing , agent hardcoded azuremonitoragent service name to monitor it for sometime in production without need of manifest update from extesnion side.
- # So that they can get sense of resource usage for their extensions. This we did for few months and now we no logner monitoring it in production.
- # But we are changing the config flag expiry time to future date in this test. So that test agent will start track the cgroups that is used by the service.
- result = self._ssh_client.run_command(f"update-waagent-conf Debug.CgroupMonitorExpiryTime={expiry_time}", use_sudo=True)
- log.info(result)
- log.info("Updated agent cgroups config(CgroupMonitorExpiryTime)")
-
def _install_ama(self):
ama_extension = VirtualMachineExtensionClient(
self._context.vm, VmExtensionIds.AzureMonitorLinuxAgent,
diff --git a/tests_e2e/tests/ext_sequencing/ext_sequencing.py b/tests_e2e/tests/ext_sequencing/ext_sequencing.py
index b2b3b9a70..aa24c13b0 100644
--- a/tests_e2e/tests/ext_sequencing/ext_sequencing.py
+++ b/tests_e2e/tests/ext_sequencing/ext_sequencing.py
@@ -22,6 +22,7 @@
# validates they are enabled in order of dependencies.
#
import copy
+import json
import random
import re
import uuid
@@ -225,13 +226,42 @@ def run(self):
# We only expect to catch an exception during deployment if we are forcing one of the extensions to
# fail. We know an extension should fail if "failing" is in the case name. Otherwise, report the
# failure.
- deployment_failure_pattern = r"[\s\S]*\"details\": [\s\S]* \"code\": \"(?P.*)\"[\s\S]* \"message\": \"(?P.*)\"[\s\S]*"
- msg_pattern = r"Multiple VM extensions failed to be provisioned on the VM. Please see the VM extension instance view for other failures. The first extension failed due to the error: VM Extension '.*' is marked as failed since it depends upon the VM Extension 'CustomScript' which has failed."
- deployment_failure_match = re.match(deployment_failure_pattern, str(e))
+ #
+ # Example deployment failure:
+ # (DeploymentFailed) At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/arm-deployment-operations for usage details.
+ # Code: DeploymentFailed
+ # Message: At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/arm-deployment-operations for usage details.
+ # Exception Details: (Conflict) {
+ # "status": "Failed",
+ # "error": {
+ # "code": "ResourceDeploymentFailure",
+ # "message": "The resource write operation failed to complete successfully, because it reached terminal provisioning state 'Failed'.",
+ # "details": [
+ # {
+ # "code": "VMExtensionProvisioningError",
+ # "target": "0",
+ # "message": "Multiple VM extensions failed to be provisioned on the VM. The Extensions failed due to the errors: \n[Extension Name: 'AzureMonitorLinuxAgent'\nError Message: VM Extension 'AzureMonitorLinuxAgent' is marked as failed since it depends upon the VM Extension 'CustomScript' which has failed.]\n\n[Extension Name: 'CustomScript'\nError Message: VM has reported a failure when processing extension 'CustomScript' (publisher 'Microsoft.Azure.Extensions' and type 'CustomScript'). Error message: 'Enable failed: failed to execute command: command terminated with exit status=1\n[stdout]\n\n[stderr]\n'. More information on troubleshooting is available at https://aka.ms/VMExtensionCSELinuxTroubleshoot. ]\n"
+ # }
+ # ]
+ # }
+ # }
if "failing" not in case.__name__:
fail("Extension template deployment unexpectedly failed: {0}".format(e))
- elif not deployment_failure_match or deployment_failure_match.group("code") != "VMExtensionProvisioningError" or not re.match(msg_pattern, deployment_failure_match.group("msg")):
- fail("Extension template deployment failed as expected, but with an unexpected error: {0}".format(e))
+ else:
+ deployment_failure_pattern = r"[\s\S]*\"code\":\s*\"ResourceDeploymentFailure\"[\s\S]*\"details\":\s*\[\s*(?P[\s\S]*)\]"
+ deployment_failure_match = re.match(deployment_failure_pattern, str(e))
+ try:
+ if deployment_failure_match is None:
+ raise Exception("Unable to match a ResourceDeploymentFailure")
+ error_json = json.loads(deployment_failure_match.group("error"))
+ error_code = error_json['code']
+ error_message = error_json['message']
+ except Exception as parse_exc:
+ fail("Extension template deployment failed as expected, but there was an error in parsing the failure. Parsing failure: {0}\nDeployment Failure: {1}".format(parse_exc, e))
+
+ msg_pattern = r"Multiple VM extensions failed to be provisioned on the VM[\s\S]*VM Extension '.*' is marked as failed since it depends upon the VM Extension 'CustomScript' which has failed."
+ if error_code != "VMExtensionProvisioningError" or re.match(msg_pattern, error_message) is None:
+ fail("Extension template deployment failed as expected, but with an unexpected error: {0}".format(e))
# Get the extensions on the VMSS from the instance view
log.info("")
diff --git a/tests_e2e/tests/initial_agent_update/initial_agent_update.py b/tests_e2e/tests/initial_agent_update/initial_agent_update.py
new file mode 100644
index 000000000..455dcd3ee
--- /dev/null
+++ b/tests_e2e/tests/initial_agent_update/initial_agent_update.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+
+# Microsoft Azure Linux Agent
+#
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from assertpy import fail
+
+from tests_e2e.tests.lib.agent_test import AgentVmTest
+from tests_e2e.tests.lib.agent_test_context import AgentVmTestContext
+from tests_e2e.tests.lib.logging import log
+from tests_e2e.tests.lib.retry import retry_if_false
+
+
+class InitialAgentUpdate(AgentVmTest):
+ """
+ This test verifies that the Agent does initial update on very first goal state before it starts processing extensions for new vms that are enrolled into RSM
+ """
+ def __init__(self, context: AgentVmTestContext):
+ super().__init__(context)
+ self._ssh_client = self._context.create_ssh_client()
+ self._test_version = "2.8.9.9"
+
+ def run(self):
+
+ log.info("Testing initial agent update for new vms that are enrolled into RSM")
+
+ log.info("Retrieving latest version from goal state to verify initial agent update")
+ latest_version: str = self._ssh_client.run_command("agent_update-self_update_latest_version.py --family_type Prod",
+ use_sudo=True).rstrip()
+ log.info("Latest Version: %s", latest_version)
+ self._verify_agent_updated_to_latest_version(latest_version)
+ self._verify_agent_updated_before_processing_goal_state(latest_version)
+
+ def _verify_agent_updated_to_latest_version(self, latest_version: str) -> None:
+ """
+ Verifies the agent updated to latest version from custom image test version.
+ """
+ log.info("Verifying agent updated to latest version: {0} from custom image test version: {1}".format(latest_version, self._test_version))
+ self._verify_guest_agent_update(latest_version)
+
+ def _verify_guest_agent_update(self, latest_version: str) -> None:
+ """
+ Verify current agent version running on latest version
+ """
+
+ def _check_agent_version(latest_version: str) -> bool:
+ waagent_version: str = self._ssh_client.run_command("waagent-version", use_sudo=True)
+ expected_version = f"Goal state agent: {latest_version}"
+ if expected_version in waagent_version:
+ return True
+ else:
+ return False
+
+ log.info("Running waagent --version and checking Goal state agent version")
+ success: bool = retry_if_false(lambda: _check_agent_version(latest_version), delay=60)
+ waagent_version: str = self._ssh_client.run_command("waagent-version", use_sudo=True)
+ if not success:
+ fail("Guest agent didn't update to latest version {0} but found \n {1}".format(
+ latest_version, waagent_version))
+ log.info(
+ f"Successfully verified agent updated to latest version. Current agent version running:\n {waagent_version}")
+
+ def _verify_agent_updated_before_processing_goal_state(self, latest_version) -> None:
+ log.info("Checking agent log if agent does initial update with self-update before processing goal state")
+
+ output = self._ssh_client.run_command(
+ "initial_agent_update-agent_update_check_from_log.py --current_version {0} --latest_version {1}".format(self._test_version, latest_version))
+ log.info(output)
diff --git a/tests_e2e/tests/lib/agent_log.py b/tests_e2e/tests/lib/agent_log.py
index 83f77b1ea..5e31973da 100644
--- a/tests_e2e/tests/lib/agent_log.py
+++ b/tests_e2e/tests/lib/agent_log.py
@@ -312,7 +312,7 @@ def get_errors(self) -> List[AgentLogRecord]:
# Reasons (first 5 errors): [ProtocolError] [Wireserver Exception] [ProtocolError] [Wireserver Failed] URI http://168.63.129.16/machine?comp=telemetrydata [HTTP Failed] Status Code 400: Traceback (most recent call last):
#
{
- 'message': r"(?s)\[ProtocolError\].*http://168.63.129.16/machine\?comp=telemetrydata.*Status Code 400",
+ 'message': r"(?s)\[ProtocolError\].*http:\/\/168.63.129.16\/machine\?comp=telemetrydata.*Status Code 400",
'if': lambda r: r.thread == 'SendTelemetryHandler' and self._increment_counter("SendTelemetryHandler-telemetrydata-Status Code 400") < 2 # ignore unless there are 2 or more instances
},
#
@@ -380,6 +380,24 @@ def get_errors(self) -> List[AgentLogRecord]:
'message': r"Unable to determine version of iptables: \[Errno 2\] No such file or directory: 'iptables'",
'if': lambda r: DISTRO_NAME == 'ubuntu'
},
+ #
+ # TODO: The Daemon has not been updated on Azure Linux 3; remove this message when it is.
+ #
+ # 2024-08-05T14:36:48.004865Z WARNING Daemon Daemon Unable to load distro implementation for azurelinux. Using default distro implementation instead.
+ #
+ {
+ 'message': r"Unable to load distro implementation for azurelinux. Using default distro implementation instead.",
+ 'if': lambda r: DISTRO_NAME == 'azurelinux' and r.prefix == 'Daemon' and r.level == 'WARNING'
+ },
+ #
+ # TODO: The OMS extension does not support Azure Linux 3; remove this message when it does.
+ #
+ # 2024-08-12T17:40:48.375193Z ERROR ExtHandler ExtHandler Event: name=Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux, op=Install, message=[ExtensionOperationError] Non-zero exit code: 51, /var/lib/waagent/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux-1.19.0/omsagent_shim.sh -install
+ #
+ {
+ 'message': r"name=Microsoft\.EnterpriseCloud\.Monitoring\.OmsAgentForLinux.+Non-zero exit code: 51",
+ 'if': lambda r: DISTRO_NAME == 'azurelinux' and DISTRO_VERSION == '3.0'
+ },
]
def is_error(r: AgentLogRecord) -> bool:
diff --git a/tests_e2e/tests/lib/agent_setup_helpers.py b/tests_e2e/tests/lib/agent_setup_helpers.py
new file mode 100644
index 000000000..bfd6cff4e
--- /dev/null
+++ b/tests_e2e/tests/lib/agent_setup_helpers.py
@@ -0,0 +1,39 @@
+#
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Common helper functions for agent setup used by the tests
+#
+import time
+
+from tests_e2e.tests.lib.logging import log
+from tests_e2e.tests.lib.shell import CommandError
+from tests_e2e.tests.lib.ssh_client import SshClient
+
+
+def wait_for_agent_to_complete_provisioning(ssh_client: SshClient):
+ """
+ Wait for the agent to complete provisioning
+ """
+ log.info("Checking for the Agent to complete provisioning before starting the test validation")
+ for _ in range(5):
+ time.sleep(30)
+ try:
+ ssh_client.run_command("[ -f /var/lib/waagent/provisioned ] && exit 0 || exit 1", use_sudo=True)
+ break
+ except CommandError:
+ log.info("Waiting for agent to complete provisioning, will check again after a short delay")
+
+ else:
+ raise Exception("Timeout while waiting for the Agent to complete provisioning")
diff --git a/tests_e2e/tests/lib/cgroup_helpers.py b/tests_e2e/tests/lib/cgroup_helpers.py
index 1fe21c329..7ebb71ec1 100644
--- a/tests_e2e/tests/lib/cgroup_helpers.py
+++ b/tests_e2e/tests/lib/cgroup_helpers.py
@@ -7,7 +7,7 @@
from azurelinuxagent.common.osutil import systemd
from azurelinuxagent.common.utils import shellutil
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION
-from azurelinuxagent.ga.cgroupapi import get_cgroup_api
+from azurelinuxagent.ga.cgroupapi import get_cgroup_api, SystemdCgroupApiv1
from tests_e2e.tests.lib.agent_log import AgentLog
from tests_e2e.tests.lib.logging import log
from tests_e2e.tests.lib.retry import retry_if_false
@@ -21,11 +21,9 @@
CGROUP_TRACKED_PATTERN = re.compile(r'Started tracking cgroup ([^\s]+)\s+\[(?P[^\s]+)\]')
GATESTEXT_FULL_NAME = "Microsoft.Azure.Extensions.Edp.GATestExtGo"
-GATESTEXT_SERVICE = "gatestext.service"
+GATESTEXT_SERVICE = "gatestext"
AZUREMONITOREXT_FULL_NAME = "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent"
-AZUREMONITORAGENT_SERVICE = "azuremonitoragent.service"
-MDSD_SERVICE = "mdsd.service"
-
+AZUREMONITORAGENT_SERVICE = "azuremonitoragent"
def verify_if_distro_supports_cgroup():
"""
@@ -164,9 +162,14 @@ def check_log_message(message, after_timestamp=datetime.datetime.min):
return False
-def get_unit_cgroup_paths(unit_name):
+def get_unit_cgroup_proc_path(unit_name, controller):
"""
- Returns the cgroup paths for the given unit
+ Returns the cgroup.procs path for the given unit and controller.
"""
cgroups_api = get_cgroup_api()
- return cgroups_api.get_unit_cgroup_paths(unit_name)
+ unit_cgroup = cgroups_api.get_unit_cgroup(unit_name=unit_name, cgroup_name="test cgroup")
+ if isinstance(cgroups_api, SystemdCgroupApiv1):
+ return unit_cgroup.get_controller_procs_path(controller=controller)
+ else:
+ return unit_cgroup.get_procs_path()
+
diff --git a/tests_e2e/tests/lib/virtual_machine_runcommand_client.py b/tests_e2e/tests/lib/virtual_machine_runcommand_client.py
new file mode 100644
index 000000000..7858c6fc9
--- /dev/null
+++ b/tests_e2e/tests/lib/virtual_machine_runcommand_client.py
@@ -0,0 +1,130 @@
+# Microsoft Azure Linux Agent
+#
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# This module includes facilities to execute VM extension runcommand operations (enable, remove, etc).
+#
+import json
+from typing import Any, Dict, Callable
+from assertpy import soft_assertions, assert_that
+
+from azure.mgmt.compute import ComputeManagementClient
+from azure.mgmt.compute.models import VirtualMachineRunCommand, VirtualMachineRunCommandScriptSource, VirtualMachineRunCommandInstanceView
+
+from tests_e2e.tests.lib.azure_sdk_client import AzureSdkClient
+from tests_e2e.tests.lib.logging import log
+from tests_e2e.tests.lib.retry import execute_with_retry
+from tests_e2e.tests.lib.virtual_machine_client import VirtualMachineClient
+from tests_e2e.tests.lib.vm_extension_identifier import VmExtensionIdentifier
+
+
+class VirtualMachineRunCommandClient(AzureSdkClient):
+ """
+ Client for operations virtual machine RunCommand extensions.
+ """
+ def __init__(self, vm: VirtualMachineClient, extension: VmExtensionIdentifier, resource_name: str = None):
+ super().__init__()
+ self._vm: VirtualMachineClient = vm
+ self._identifier = extension
+ self._resource_name = resource_name or extension.type
+ self._compute_client: ComputeManagementClient = AzureSdkClient.create_client(ComputeManagementClient, self._vm.cloud, self._vm.subscription)
+
+ def get_instance_view(self) -> VirtualMachineRunCommandInstanceView:
+ """
+ Retrieves the instance view of the run command extension
+ """
+ log.info("Retrieving instance view for %s...", self._identifier)
+
+ return execute_with_retry(lambda: self._compute_client.virtual_machine_run_commands.get_by_virtual_machine(
+ resource_group_name=self._vm.resource_group,
+ vm_name=self._vm.name,
+ run_command_name=self._resource_name,
+ expand="instanceView"
+ ).instance_view)
+
+ def enable(
+ self,
+ settings: Dict[str, Any] = None,
+ timeout: int = AzureSdkClient._DEFAULT_TIMEOUT
+ ) -> None:
+ """
+ Performs an enable operation on the run command extension.
+ """
+ run_command_parameters = VirtualMachineRunCommand(
+ location=self._vm.location,
+ source=VirtualMachineRunCommandScriptSource(
+ script=settings.get("source") if settings is not None else settings
+ )
+ )
+
+ log.info("Enabling %s", self._identifier)
+ log.info("%s", run_command_parameters)
+
+ result: VirtualMachineRunCommand = self._execute_async_operation(
+ lambda: self._compute_client.virtual_machine_run_commands.begin_create_or_update(
+ self._vm.resource_group,
+ self._vm.name,
+ self._resource_name,
+ run_command_parameters),
+ operation_name=f"Enable {self._identifier}",
+ timeout=timeout)
+
+ log.info("Provisioning state: %s", result.provisioning_state)
+
+ def delete(self, timeout: int = AzureSdkClient._DEFAULT_TIMEOUT) -> None:
+ """
+ Performs a delete operation on the run command extension
+ """
+ self._execute_async_operation(
+ lambda: self._compute_client.virtual_machine_run_commands.begin_delete(
+ self._vm.resource_group,
+ self._vm.name,
+ self._resource_name),
+ operation_name=f"Delete {self._identifier}",
+ timeout=timeout)
+
+ def assert_instance_view(
+ self,
+ expected_status_code: str = "Succeeded",
+ expected_exit_code: int = 0,
+ expected_message: str = None,
+ assert_function: Callable[[VirtualMachineRunCommandInstanceView], None] = None
+ ) -> None:
+ """
+ Asserts that the run command's instance view matches the given expected values. If 'expected_message' is
+ omitted, it is not validated.
+
+ If 'assert_function' is provided, it is invoked passing as parameter the instance view. This function can be used to perform
+ additional validations.
+ """
+ instance_view = self.get_instance_view()
+ log.info("Instance view:\n%s", json.dumps(instance_view.serialize(), indent=4))
+
+ with soft_assertions():
+ if expected_message is not None:
+ assert_that(expected_message in instance_view.output).described_as(f"{expected_message} should be in the InstanceView message ({instance_view.output})").is_true()
+
+ assert_that(instance_view.execution_state).described_as("InstanceView execution state").is_equal_to(expected_status_code)
+ assert_that(instance_view.exit_code).described_as("InstanceView exit code").is_equal_to(expected_exit_code)
+
+ if assert_function is not None:
+ assert_function(instance_view)
+
+ log.info("The instance view matches the expected values")
+
+ def __str__(self):
+ return f"{self._identifier}"
diff --git a/tests_e2e/tests/log_collector/log_collector.py b/tests_e2e/tests/log_collector/log_collector.py
new file mode 100755
index 000000000..945d69c15
--- /dev/null
+++ b/tests_e2e/tests/log_collector/log_collector.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python3
+
+# Microsoft Azure Linux Agent
+#
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import re
+import time
+
+from assertpy import fail
+
+from azurelinuxagent.common.utils.shellutil import CommandError
+from tests_e2e.tests.lib.agent_test import AgentVmTest
+from tests_e2e.tests.lib.logging import log
+
+
+class LogCollector(AgentVmTest):
+ """
+ Tests that the log collector logs the expected behavior on periodic runs.
+ """
+ def run(self):
+ ssh_client = self._context.create_ssh_client()
+
+ # Rename the agent log file so that the test does not pick up any incomplete log collector runs that started
+ # before the config is updated
+ # Enable Cgroup v2 resource limiting and reduce log collector iniital delay via config
+ log.info("Renaming agent log file and modifying log collector conf flags")
+ setup_script = ("agent-service stop && mv /var/log/waagent.log /var/log/waagent.$(date --iso-8601=seconds).log && "
+ "update-waagent-conf Logs.Collect=y Debug.EnableCgroupV2ResourceLimiting=y Debug.LogCollectorInitialDelay=60")
+ ssh_client.run_command(f"sh -c '{setup_script}'", use_sudo=True)
+ log.info('Renamed log file and updated log collector config flags')
+
+ # Wait for log collector to finish uploading logs
+ for _ in range(3):
+ time.sleep(90)
+ try:
+ ssh_client.run_command("grep 'Successfully uploaded logs' /var/log/waagent.log")
+ break
+ except CommandError:
+ log.info("The Agent has not finished log collection, will check again after a short delay")
+ else:
+ raise Exception("Timeout while waiting for the Agent to finish log collection")
+
+ # Get any agent logs between log collector start and finish
+ try:
+ # We match the first full log collector run in the agent log (this test just needs to validate any full log collector run, does not matter if it's the first or last)
+ lc_start_pattern = "INFO CollectLogsHandler ExtHandler Starting log collection"
+ lc_end_pattern = "INFO CollectLogsHandler ExtHandler Successfully uploaded logs"
+ output = ssh_client.run_command("sed -n '/{0}/,/{1}/{{p;/{1}/q}}' /var/log/waagent.log".format(lc_start_pattern, lc_end_pattern)).rstrip().splitlines()
+ except Exception as e:
+ raise Exception("Unable to get log collector logs from waagent.log: {0}".format(e))
+
+ # These logs indicate a successful log collector run with resource enforcement and monitoring
+ expected = [
+ r'.*Starting log collection',
+ r'.*Using cgroup v\d for resource enforcement and monitoring',
+ r'.*cpu(,cpuacct)? controller for cgroup: azure-walinuxagent-logcollector \[\/sys\/fs\/cgroup(\/cpu,cpuacct)?\/azure.slice\/azure-walinuxagent.slice\/azure-walinuxagent\-logcollector.slice\/collect\-logs.scope\]',
+ r'.*memory controller for cgroup: azure-walinuxagent-logcollector \[\/sys\/fs\/cgroup(\/memory)?\/azure.slice\/azure-walinuxagent.slice\/azure-walinuxagent\-logcollector.slice\/collect\-logs.scope\]',
+ r'.*Log collection successfully completed',
+ r'.*Successfully collected logs',
+ r'.*Successfully uploaded logs'
+ ]
+
+ # Filter output to only include relevant log collector logs
+ lc_logs = [log for log in output if len([pattern for pattern in expected if re.match(pattern, log)]) > 0]
+
+ # Check that all expected logs exist and are in the correct order
+ indent = lambda lines: "\n".join([f" {ln}" for ln in lines])
+ if len(lc_logs) == len(expected) and all([re.match(expected[i], lc_logs[i]) is not None for i in range(len(expected))]):
+ log.info("The log collector run completed as expected.\nLog messages:\n%s", indent(lc_logs))
+ else:
+ fail(f"The log collector run did not complete as expected.\nExpected:\n{indent(expected)}\nActual:\n{indent(lc_logs)}")
+
+ ssh_client.run_command("update-waagent-conf Debug.EnableCgroupV2ResourceLimiting=n Debug.LogCollectorInitialDelay=5*60",
+ use_sudo=True)
+
+
+if __name__ == "__main__":
+ LogCollector.run_from_command_line()
diff --git a/tests_e2e/tests/multi_config_ext/multi_config_ext.py b/tests_e2e/tests/multi_config_ext/multi_config_ext.py
index 4df75fd2b..d9315dea5 100644
--- a/tests_e2e/tests/multi_config_ext/multi_config_ext.py
+++ b/tests_e2e/tests/multi_config_ext/multi_config_ext.py
@@ -28,6 +28,8 @@
from azure.mgmt.compute.models import VirtualMachineInstanceView
from tests_e2e.tests.lib.agent_test import AgentVmTest
+from tests_e2e.tests.lib.azure_sdk_client import AzureSdkClient
+from tests_e2e.tests.lib.virtual_machine_runcommand_client import VirtualMachineRunCommandClient
from tests_e2e.tests.lib.vm_extension_identifier import VmExtensionIds
from tests_e2e.tests.lib.logging import log
from tests_e2e.tests.lib.virtual_machine_client import VirtualMachineClient
@@ -36,7 +38,7 @@
class MultiConfigExt(AgentVmTest):
class TestCase:
- def __init__(self, extension: VirtualMachineExtensionClient, get_settings: Callable[[str], Dict[str, str]]):
+ def __init__(self, extension: AzureSdkClient, get_settings: Callable[[str], Dict[str, str]]):
self.extension = extension
self.get_settings = get_settings
self.test_guid: str = str(uuid.uuid4())
@@ -89,19 +91,18 @@ def run(self):
# Create 3 different RCv2 extensions and a single config extension (CSE) and assign each a unique guid. Each
# extension will have settings that echo its assigned guid. We will use this guid to verify the extension
# statuses later.
- mc_settings: Callable[[Any], Dict[str, Dict[str, str]]] = lambda s: {
- "source": {"script": f"echo {s}"}}
+ mc_settings: Callable[[Any], Dict[str, str]] = lambda s: {"source": f"echo {s}"}
sc_settings: Callable[[Any], Dict[str, str]] = lambda s: {'commandToExecute': f"echo {s}"}
test_cases: Dict[str, MultiConfigExt.TestCase] = {
"MCExt1": MultiConfigExt.TestCase(
- VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.RunCommandHandler,
+ VirtualMachineRunCommandClient(self._context.vm, VmExtensionIds.RunCommandHandler,
resource_name="MCExt1"), mc_settings),
"MCExt2": MultiConfigExt.TestCase(
- VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.RunCommandHandler,
+ VirtualMachineRunCommandClient(self._context.vm, VmExtensionIds.RunCommandHandler,
resource_name="MCExt2"), mc_settings),
"MCExt3": MultiConfigExt.TestCase(
- VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.RunCommandHandler,
+ VirtualMachineRunCommandClient(self._context.vm, VmExtensionIds.RunCommandHandler,
resource_name="MCExt3"), mc_settings),
"CSE": MultiConfigExt.TestCase(
VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.CustomScript), sc_settings)
@@ -116,10 +117,10 @@ def run(self):
# Update MCExt3 and CSE with new guids and add a new instance of RCv2 to the VM
updated_test_cases: Dict[str, MultiConfigExt.TestCase] = {
"MCExt3": MultiConfigExt.TestCase(
- VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.RunCommandHandler,
+ VirtualMachineRunCommandClient(self._context.vm, VmExtensionIds.RunCommandHandler,
resource_name="MCExt3"), mc_settings),
"MCExt4": MultiConfigExt.TestCase(
- VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.RunCommandHandler,
+ VirtualMachineRunCommandClient(self._context.vm, VmExtensionIds.RunCommandHandler,
resource_name="MCExt4"), mc_settings),
"CSE": MultiConfigExt.TestCase(
VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.CustomScript), sc_settings)
@@ -138,10 +139,10 @@ def run(self):
log.info("Add only multi-config extensions to the VM...")
mc_test_cases: Dict[str, MultiConfigExt.TestCase] = {
"MCExt5": MultiConfigExt.TestCase(
- VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.RunCommandHandler,
+ VirtualMachineRunCommandClient(self._context.vm, VmExtensionIds.RunCommandHandler,
resource_name="MCExt5"), mc_settings),
"MCExt6": MultiConfigExt.TestCase(
- VirtualMachineExtensionClient(self._context.vm, VmExtensionIds.RunCommandHandler,
+ VirtualMachineRunCommandClient(self._context.vm, VmExtensionIds.RunCommandHandler,
resource_name="MCExt6"), mc_settings)
}
self.enable_and_assert_test_cases(cases_to_enable=mc_test_cases, cases_to_assert=mc_test_cases,
diff --git a/tests_e2e/tests/scripts/agent_cgroups_process_check-unknown_process_check.py b/tests_e2e/tests/scripts/agent_cgroups_process_check-unknown_process_check.py
index d1b3014a0..fff5746cc 100755
--- a/tests_e2e/tests/scripts/agent_cgroups_process_check-unknown_process_check.py
+++ b/tests_e2e/tests/scripts/agent_cgroups_process_check-unknown_process_check.py
@@ -18,14 +18,13 @@
# This script forces the process check by putting unknown process in the agent's cgroup
-import os
import subprocess
import datetime
from assertpy import fail
from azurelinuxagent.common.utils import shellutil
-from tests_e2e.tests.lib.cgroup_helpers import check_agent_quota_disabled, check_log_message, get_unit_cgroup_paths, AGENT_SERVICE_NAME
+from tests_e2e.tests.lib.cgroup_helpers import check_agent_quota_disabled, check_log_message, get_unit_cgroup_proc_path, AGENT_SERVICE_NAME
from tests_e2e.tests.lib.logging import log
from tests_e2e.tests.lib.retry import retry_if_false
@@ -62,8 +61,8 @@ def disable_agent_cgroups_with_unknown_process(pid):
Note: System may kick the added process out of the cgroups, keeps adding until agent detect that process
"""
- def unknown_process_found(cpu_cgroup):
- cgroup_procs_path = os.path.join(cpu_cgroup, "cgroup.procs")
+ def unknown_process_found():
+ cgroup_procs_path = get_unit_cgroup_proc_path(AGENT_SERVICE_NAME, 'cpu,cpuacct')
log.info("Adding dummy process %s to cgroup.procs file %s", pid, cgroup_procs_path)
try:
with open(cgroup_procs_path, 'a') as f:
@@ -81,9 +80,7 @@ def unknown_process_found(cpu_cgroup):
pid)), attempts=3)
return found and retry_if_false(check_agent_quota_disabled, attempts=3)
- cpu_cgroup, _ = get_unit_cgroup_paths(AGENT_SERVICE_NAME)
-
- found: bool = retry_if_false(lambda: unknown_process_found(cpu_cgroup), attempts=3)
+ found: bool = retry_if_false(unknown_process_found, attempts=3)
if not found:
fail("The agent did not detect unknown process: {0}".format(pid))
diff --git a/tests_e2e/tests/scripts/agent_cpu_quota-check_agent_cpu_quota.py b/tests_e2e/tests/scripts/agent_cpu_quota-check_agent_cpu_quota.py
index 5dfc55be8..29758d02b 100755
--- a/tests_e2e/tests/scripts/agent_cpu_quota-check_agent_cpu_quota.py
+++ b/tests_e2e/tests/scripts/agent_cpu_quota-check_agent_cpu_quota.py
@@ -115,7 +115,7 @@ def check_agent_log_for_metrics() -> bool:
if match is not None:
processor_time.append(float(match.group(1)))
else:
- match = re.search(r"Throttled Time\s*\[walinuxagent.service\]\s*=\s*([0-9.]+)", record.message)
+ match = re.search(r"Throttled Time \(s\)\s*\[walinuxagent.service\]\s*=\s*([0-9.]+)", record.message)
if match is not None:
throttled_time.append(float(match.group(1)))
if len(processor_time) < 1 or len(throttled_time) < 1:
diff --git a/tests_e2e/tests/scripts/agent_update-self_update_latest_version.py b/tests_e2e/tests/scripts/agent_update-self_update_latest_version.py
index 4be0f0dc3..004011dec 100755
--- a/tests_e2e/tests/scripts/agent_update-self_update_latest_version.py
+++ b/tests_e2e/tests/scripts/agent_update-self_update_latest_version.py
@@ -19,20 +19,22 @@
# returns the agent latest version published
#
+import argparse
+
from azurelinuxagent.common.protocol.goal_state import GoalStateProperties
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
from tests_e2e.tests.lib.retry import retry
-def get_agent_family_manifest(goal_state):
+def get_agent_family_manifest(goal_state, family_type):
"""
- Get the agent_family from last GS for Test Family
+ Get the agent_family from last GS for given Family
"""
agent_families = goal_state.extensions_goal_state.agent_families
agent_family_manifests = []
for m in agent_families:
- if m.name == 'Test':
+ if m.name == family_type:
if len(m.uris) > 0:
agent_family_manifests.append(m)
return agent_family_manifests[0]
@@ -53,11 +55,14 @@ def get_largest_version(agent_manifest):
def main():
try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--family_type', dest="family_type", default="Test")
+ args = parser.parse_args()
protocol = get_protocol_util().get_protocol(init_goal_state=False)
retry(lambda: protocol.client.reset_goal_state(
goal_state_properties=GoalStateProperties.ExtensionsGoalState))
goal_state = protocol.client.get_goal_state()
- agent_family = get_agent_family_manifest(goal_state)
+ agent_family = get_agent_family_manifest(goal_state, args.family_type)
agent_manifest = goal_state.fetch_agent_manifest(agent_family.name, agent_family.uris)
largest_version = get_largest_version(agent_manifest)
print(str(largest_version))
diff --git a/tests_e2e/tests/scripts/agent_update-wait_for_rsm_gs.py b/tests_e2e/tests/scripts/agent_update-wait_for_rsm_gs.py
index 832e0fd64..f46ee5c59 100755
--- a/tests_e2e/tests/scripts/agent_update-wait_for_rsm_gs.py
+++ b/tests_e2e/tests/scripts/agent_update-wait_for_rsm_gs.py
@@ -60,7 +60,8 @@ def main():
retry(lambda: protocol.client.reset_goal_state(
goal_state_properties=GoalStateProperties.ExtensionsGoalState))
- found: bool = retry_if_false(lambda: verify_rsm_requested_version(protocol, args.version))
+ # whole pipeline can take some time to update the goal state with the requested version, so increasing the timeout
+ found: bool = retry_if_false(lambda: verify_rsm_requested_version(protocol, args.version), delay=60)
if not found:
raise Exception("The latest goal state didn't contain requested version after we submit the rsm request for: {0}.".format(args.version))
diff --git a/tests_e2e/tests/scripts/ext_cgroups-check_cgroups_extensions.py b/tests_e2e/tests/scripts/ext_cgroups-check_cgroups_extensions.py
index 8d97da3f7..2cd6d9492 100755
--- a/tests_e2e/tests/scripts/ext_cgroups-check_cgroups_extensions.py
+++ b/tests_e2e/tests/scripts/ext_cgroups-check_cgroups_extensions.py
@@ -24,11 +24,10 @@
from tests_e2e.tests.lib.agent_log import AgentLog
from tests_e2e.tests.lib.cgroup_helpers import verify_if_distro_supports_cgroup, \
verify_agent_cgroup_assigned_correctly, BASE_CGROUP, EXT_CONTROLLERS, get_unit_cgroup_mount_path, \
- GATESTEXT_SERVICE, AZUREMONITORAGENT_SERVICE, MDSD_SERVICE, check_agent_quota_disabled, \
+ GATESTEXT_SERVICE, AZUREMONITORAGENT_SERVICE, check_agent_quota_disabled, \
check_cgroup_disabled_with_unknown_process, CGROUP_TRACKED_PATTERN, AZUREMONITOREXT_FULL_NAME, GATESTEXT_FULL_NAME, \
print_cgroups
from tests_e2e.tests.lib.logging import log
-from tests_e2e.tests.lib.remote_test import run_remote_test
from tests_e2e.tests.lib.retry import retry_if_false
@@ -119,10 +118,6 @@ def verify_extension_service_cgroup_created_on_file_system():
# Azure Monitor Extension Service
azuremonitoragent_cgroup_mount_path = get_unit_cgroup_mount_path(AZUREMONITORAGENT_SERVICE)
azuremonitoragent_service_name = AZUREMONITORAGENT_SERVICE
- # Old versions of AMA extension has different service name
- if azuremonitoragent_cgroup_mount_path is None:
- azuremonitoragent_cgroup_mount_path = get_unit_cgroup_mount_path(MDSD_SERVICE)
- azuremonitoragent_service_name = MDSD_SERVICE
verify_extension_service_cgroup_created(azuremonitoragent_service_name, azuremonitoragent_cgroup_mount_path)
log.info('Verified all extension service cgroup paths created in file system .\n')
@@ -178,7 +173,7 @@ def verify_ext_cgroups_tracked():
azuremonitoragent_cgroups_tracked = True
elif name.startswith(GATESTEXT_SERVICE):
gatestext_service_cgroups_tracked = True
- elif name.startswith(AZUREMONITORAGENT_SERVICE) or name.startswith(MDSD_SERVICE):
+ elif name.startswith(AZUREMONITORAGENT_SERVICE):
azuremonitoragent_service_cgroups_tracked = True
cgroups_added_for_telemetry.append((name, path))
@@ -216,10 +211,10 @@ def main():
try:
- run_remote_test(main)
+ main()
except Exception as e:
# It is possible that agent cgroup can be disabled due to UNKNOWN process or throttled before we run this check, in that case, we should ignore the validation
- if check_cgroup_disabled_with_unknown_process() and retry_if_false(check_agent_quota_disabled()):
+ if check_cgroup_disabled_with_unknown_process() and retry_if_false(check_agent_quota_disabled):
log.info("Cgroup is disabled due to UNKNOWN process, ignoring ext cgroups validations")
else:
raise
diff --git a/tests_e2e/tests/scripts/initial_agent_update-agent_update_check_from_log.py b/tests_e2e/tests/scripts/initial_agent_update-agent_update_check_from_log.py
new file mode 100755
index 000000000..3ae62fb30
--- /dev/null
+++ b/tests_e2e/tests/scripts/initial_agent_update-agent_update_check_from_log.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env pypy3
+
+# Microsoft Azure Linux Agent
+#
+# Copyright 2018 Microsoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Checks that the initial agent update happens with self-update before processing goal state from the agent log
+
+import argparse
+import datetime
+import re
+
+from assertpy import fail
+
+from tests_e2e.tests.lib.agent_log import AgentLog
+from tests_e2e.tests.lib.logging import log
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--current_version", dest='current_version', required=True)
+ parser.add_argument("--latest_version", dest='latest_version', required=True)
+ args = parser.parse_args()
+
+ agentlog = AgentLog()
+ patterns = {
+ "goal_state": "ProcessExtensionsGoalState started",
+ "self_update": f"Self-update is ready to upgrade the new agent: {args.latest_version} now before processing the goal state",
+ "exit_process": f"Current Agent {args.current_version} completed all update checks, exiting current process to upgrade to the new Agent version {args.latest_version}"
+ }
+ first_occurrence_times = {"goal_state": datetime.time.min, "self_update": datetime.time.min, "exit_process": datetime.time.min}
+
+ for record in agentlog.read():
+ for key, pattern in patterns.items():
+ # Skip if we already found the first occurrence of the pattern
+ if first_occurrence_times[key] != datetime.time.min:
+ continue
+ if re.search(pattern, record.message, flags=re.DOTALL):
+ log.info(f"Found data: {record} in agent log")
+ first_occurrence_times[key] = record.when
+ break
+
+ if first_occurrence_times["self_update"] < first_occurrence_times["goal_state"] and first_occurrence_times["exit_process"] < first_occurrence_times["goal_state"]:
+ log.info("Verified initial agent update happened before processing goal state")
+ else:
+ fail(f"Agent initial update didn't happen before processing goal state and first_occurrence_times for patterns: {patterns} are: {first_occurrence_times}")
+
+
+if __name__ == '__main__':
+ main()