From 8c8aa8c7023a68033825925fea3a1376eee30987 Mon Sep 17 00:00:00 2001 From: Daniel Ellis Date: Fri, 23 Aug 2024 15:44:24 +0100 Subject: [PATCH] update --- .github/ISSUE_TEMPLATE/add-Institution.md | 35 --- .github/ISSUE_TEMPLATE/add-consortium.md | 39 ---- .github/ISSUE_TEMPLATE/consortium.md | 49 ++++ .github/ISSUE_TEMPLATE/default.md | 17 -- .github/ISSUE_TEMPLATE/institution.md | 46 ++++ .github/libs/action_functions.py | 109 --------- .github/libs/add/Consortium.py | 50 ----- .github/libs/add/Institution.py | 48 ---- .github/libs/checks/__init__.py | 2 - .github/libs/checks/institution.py | 25 --- .github/libs/checks/schema.py | 33 --- .github/libs/checksum_tools.py | 100 --------- .github/libs/mergeTables.py | 127 ----------- .github/libs/misc/_functions.py | 126 ----------- .github/libs/misc/model_components.py | 60 ----- .github/libs/parse/Consortium.py | 62 ------ .github/libs/parse/Institution.py | 177 --------------- .github/libs/shell/reset_versions.sh | 21 -- .github/libs/version.py | 210 ------------------ .../workflows/{ => legacy}/dispatch_all.yml | 0 .../dispatch_consortium.yml | 0 .../dispatch_model_component.yml | 0 .github/workflows/legacy/libs.zip | Bin 0 -> 18177 bytes .../workflows/{ => legacy}/version_update.yml | 0 .github/workflows/new_issue_submission.yml | 56 +++++ .github/workflows/new_x_from_issue.yml | 116 ---------- 26 files changed, 151 insertions(+), 1357 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/add-Institution.md delete mode 100644 .github/ISSUE_TEMPLATE/add-consortium.md create mode 100644 .github/ISSUE_TEMPLATE/consortium.md delete mode 100644 .github/ISSUE_TEMPLATE/default.md create mode 100644 .github/ISSUE_TEMPLATE/institution.md delete mode 100644 .github/libs/action_functions.py delete mode 100644 .github/libs/add/Consortium.py delete mode 100644 .github/libs/add/Institution.py delete mode 100644 .github/libs/checks/__init__.py delete mode 100644 .github/libs/checks/institution.py delete mode 100644 .github/libs/checks/schema.py delete mode 100644 .github/libs/checksum_tools.py delete mode 100644 .github/libs/mergeTables.py delete mode 100644 .github/libs/misc/_functions.py delete mode 100644 .github/libs/misc/model_components.py delete mode 100644 .github/libs/parse/Consortium.py delete mode 100644 .github/libs/parse/Institution.py delete mode 100644 .github/libs/shell/reset_versions.sh delete mode 100644 .github/libs/version.py rename .github/workflows/{ => legacy}/dispatch_all.yml (100%) rename .github/workflows/{untitled folder => legacy}/dispatch_consortium.yml (100%) rename .github/workflows/{untitled folder => legacy}/dispatch_model_component.yml (100%) create mode 100644 .github/workflows/legacy/libs.zip rename .github/workflows/{ => legacy}/version_update.yml (100%) create mode 100644 .github/workflows/new_issue_submission.yml delete mode 100644 .github/workflows/new_x_from_issue.yml diff --git a/.github/ISSUE_TEMPLATE/add-Institution.md b/.github/ISSUE_TEMPLATE/add-Institution.md deleted file mode 100644 index d15511e1a..000000000 --- a/.github/ISSUE_TEMPLATE/add-Institution.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -name: Add Institution -about: Adding a new institution -title: 'New Institution' -labels: 'add_institution' -assignees: '' - ---- - -# New Institution Request - -To request a new item, please amend the following template below to reflect the items you are interested in. -Conditions on naming conventions and permissible items can be found on the [WIKI](https://wiki.mipcvs.dev/CMIP6Plus/Rules/institution/) and relevant GitHub page - - - -## Content - - **NOTE** Please review the rules provided in https://wiki.mipcvs.dev/CMIP6Plus/Rules/institution/ before completing this form. Submission of this is an acceptance of any terms outlined in the referenced document at the time. - -To get an institution's ROR code, we can see if it exists on ror.org. For the entry below, we have the page: https://ror.org/000fg4e24 - -``` configfile - -[institution] - Acronym = "CMIP-IPO" - Full_Name = "Coupled Model Intercomparison Project: International Project Office" - ROR = "000fg4e24" - -``` - - diff --git a/.github/ISSUE_TEMPLATE/add-consortium.md b/.github/ISSUE_TEMPLATE/add-consortium.md deleted file mode 100644 index d1de0416c..000000000 --- a/.github/ISSUE_TEMPLATE/add-consortium.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -name: Add Consortium -about: Adding a new consortium -title: 'New Consortium' -labels: 'add_consortium' -assignees: '' - ---- - -# Add Consortium Template - -To request a new item please ammend the following template below to reflect the items you are interested in. -Conditions on naming conventions and permissable items can be found on the WIKI and relevant github pages (links to be added. ) - - - -## Contents (what we wish to add) - - -``` configfile - -[consortium] - Acronym = "CMIP" - Name = "Coupled Model Intercomparison Project" - - [institutions] - cmip6_acronyms = [ - "CMIP-IPO", - "WCRP" - ] - -``` - - diff --git a/.github/ISSUE_TEMPLATE/consortium.md b/.github/ISSUE_TEMPLATE/consortium.md new file mode 100644 index 000000000..68ec01b39 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/consortium.md @@ -0,0 +1,49 @@ +--- +name: Consortium +about: Addding a new, or updating an existing, consortium +title: 'Review request for change in consortium' +labels: 'consortium' +assignees: '' + +--- + +# Add Consortium Template + +To request a new item, please amend the template below to reflect the items you are interested in. + +Relevant conditions and naming conventions for this item can be found using the wiki pages [here](). + +## Amending Information on an Existing Item + +If you wish to amend an item, please supply only the fields you are interested in and ensure that you change the *action* field to *update*. + +``` action = update ``` + + + +## Contents (What We Wish to Add) + + + +``` configfile + + +[consortium] + Acronym = "CMIP" + Name = "Coupled Model Intercomparison Project" + + [institutions] + cmip6_acronyms = [ + "CMIP-IPO", + "WCRP" + ] + # nest instututions here, use the cmip acnronyms which they have been registered with. + + +``` + diff --git a/.github/ISSUE_TEMPLATE/default.md b/.github/ISSUE_TEMPLATE/default.md deleted file mode 100644 index 0e62f0bb4..000000000 --- a/.github/ISSUE_TEMPLATE/default.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -name: Other -about: Creating a general issue -title: 'Add a descriptive summary here.' -labels: '' -assignees: '' - ---- - -# General Issue - -Please provided a detailed outline of the problem, discussion or suggestion. - -Make sure to provide the following where it might apply. - -- [ ] Any relevant files or code snippets -- [ ] Where this occurs diff --git a/.github/ISSUE_TEMPLATE/institution.md b/.github/ISSUE_TEMPLATE/institution.md new file mode 100644 index 000000000..0455c7188 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/institution.md @@ -0,0 +1,46 @@ +--- +name: Institution +about: Addding a new, or updating an existing, institution +title: 'Review request for change in institution' +labels: 'institution' +assignees: '' + +--- + +# Add Consortium Template + +To request a new item, please amend the template below to reflect the items you are interested in. + +Relevant conditions and naming conventions for this item can be found using the wiki pages [here](). + +## Amending Information on an Existing Item + +If you wish to amend an item, please supply only the fields you are interested in and ensure that you change the *action* field to *update*. + +``` action = update ``` + + + +## Contents (What We Wish to Add) + + + +``` configfile + + +[institution] + Acronym = "CMIP-IPO" + Full_Name = "Coupled Model Intercomparison Project: International Project Office" + ROR = "000fg4e24" + + # only change the item below to "update" if you are submitting a correction. + action = "new" + + +``` + diff --git a/.github/libs/action_functions.py b/.github/libs/action_functions.py deleted file mode 100644 index 2e6c188dc..000000000 --- a/.github/libs/action_functions.py +++ /dev/null @@ -1,109 +0,0 @@ - -import os,sys,json,ast -import re,configparser -from io import StringIO - - -def parse_md(body): - # remove comments - pattern = r'' - - # Remove comments using re.sub - body = re.sub(r'/r/n',r'/n', re.sub(pattern, '', body, flags=re.DOTALL)) - - - config_str = re.search(r'```\sconfigfile(.*?)```',body, re.DOTALL).group(1) - print(config_str) - - # Create a file-like object from the string - config_file = StringIO(config_str) - - # Create a ConfigParser object - config = configparser.ConfigParser() - - # Read configuration from the file-like object - config.read_file(config_file) - - # Initialize an empty dictionary to hold the configuration data - config_dict = {} - - # Iterate over sections and options - for section in config.sections(): - config_dict[section] = {} - for option in config.options(section): - config_dict[section][option] = ast.literal_eval(config.get(section, option)) - - return config_dict - - -def dispatch(token,payload,repo): - - import json - from urllib import request - - # Construct the request headers - headers = { - "Accept": "application/vnd.github.everest-preview+json", - "Authorization": f"token {token}", - "Content-Type": "application/json" - } - - # Encode the payload - datapayload = json.dumps(payload).encode('utf-8') - - # Make the POST request - req = request.Request(f"{repo}/dispatches", data=datapayload, headers=headers, method='POST') - - # Perform the request - try: - with request.urlopen(req) as response: - if response.getcode() == 204: - print("\n\nDispatch event triggered successfully.") - else: - print(f"Failed to trigger dispatch event. Status code: {response.getcode()}") - print(response.read().decode('utf-8')) - except Exception as e: - print(f"Error: {e}") - - -def update_issue_title (issue_number,kind,payload): - if issue_number < 0: - print('\033[91m\n\nUpdating: ',payload["client_payload"]["name"],'\033[0m') - return 0 - # change issue name to reflect contents. - print(os.popen(f'gh issue edit {issue_number} --title "Add {kind}: {payload["client_payload"]["name"]}"').read()) - - -def update_issue(issue_number,comment,err=True): - - - if issue_number < 0: - print('\033[91m\n\n',comment,'\033[0m') - return 0 - - out = os.popen(f'gh issue comment {issue_number} --body "{comment}"') - - if err: - print(out) - sys.exit(comment) - -def close_issue(issue_number, comment,err=True): - if issue_number < 0: - print('\033[91m\n\n',comment,'\033[0m') - return 0 - print(os.popen(f'gh issue close {issue_number} -c "{comment}"')) - if err: sys.exit(comment) - -def jr(file): - return json.load(open(file,'r')) - -def jw(data,file): - return json.dump(data,open(file,'w'), indent=4) - -def getfile(fileend): - import glob - return glob.glob(f'*{fileend}.json') - -def pp(js): - import pprint - pprint.pprint(js) \ No newline at end of file diff --git a/.github/libs/add/Consortium.py b/.github/libs/add/Consortium.py deleted file mode 100644 index b03e6c73a..000000000 --- a/.github/libs/add/Consortium.py +++ /dev/null @@ -1,50 +0,0 @@ - -import json,sys,os,re - -# Add the current directory to the Python path -# current_dir = os.path.dirname(os.path.realpath(__file__)) -# sys.path.append(current_dir) - -# Get the parent directory of the current file -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -sys.path.append(parent_dir) - -from action_functions import parse_md, dispatch, update_issue_title - - -issue_number = os.environ.get('ISSUE_NUMBER') -issue_title = os.environ.get('ISSUE_TITLE') -issue_body = os.environ.get('ISSUE_BODY') -issue_submitter = os.environ.get('ISSUE_SUBMITTER') -repo = os.environ.get('REPO').replace('https://github.com','https://api.github.com/repos') -token = os.environ.get('GH_TOKEN') - - -parsed = parse_md(issue_body) - - -''' -Lets submit the data to a dispatch event -''' - - -data = parsed['consortium'] -data['institutions'] = parsed['institutions']['cmip6_acronyms'] - - -kind = __file__.split('/')[-1].replace('.py','') - -payload = { - "event_type": kind, - "client_payload": { - "name": data['acronym'], # we need this to define the pull request - "issue": issue_number, - "author" : issue_submitter, - "data" : json.dumps(data) - } -} - -update_issue_title(issue_number,kind,payload) - -dispatch(token,payload,repo) - diff --git a/.github/libs/add/Institution.py b/.github/libs/add/Institution.py deleted file mode 100644 index ed8a9b7e6..000000000 --- a/.github/libs/add/Institution.py +++ /dev/null @@ -1,48 +0,0 @@ - -import json,sys,os,re - -# Add the current directory to the Python path -# current_dir = os.path.dirname(os.path.realpath(__file__)) -# sys.path.append(current_dir) - -# Get the parent directory of the current file -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -sys.path.append(parent_dir) - -from action_functions import parse_md, dispatch, update_issue_title - -# generic -issue_number = os.environ.get('ISSUE_NUMBER') -issue_title = os.environ.get('ISSUE_TITLE') -issue_body = os.environ.get('ISSUE_BODY') -issue_submitter = os.environ.get('ISSUE_SUBMITTER') -repo = os.environ.get('REPO').replace('https://github.com','https://api.github.com/repos') -token = os.environ.get('GH_TOKEN') - -# get content. -parsed = parse_md(issue_body) - - -''' -Lets submit the data to a dispatch event -''' - -data = parsed['institution'] - - -kind = __file__.split('/')[-1].replace('.py','') - -payload = { - "event_type": kind, - "client_payload": { - "name": data['acronym'], # we need this to define the pull request - "issue": issue_number, - "author" : issue_submitter, - "data" : json.dumps(data) - } -} - -update_issue_title(issue_number,kind,payload) - -dispatch(token,payload,repo) - diff --git a/.github/libs/checks/__init__.py b/.github/libs/checks/__init__.py deleted file mode 100644 index 9b60830ff..000000000 --- a/.github/libs/checks/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from . import schema -from . import institution \ No newline at end of file diff --git a/.github/libs/checks/institution.py b/.github/libs/checks/institution.py deleted file mode 100644 index 125230b9d..000000000 --- a/.github/libs/checks/institution.py +++ /dev/null @@ -1,25 +0,0 @@ -import os, json, sys, glob - -def validate(jsn,iloc): - path = os.path.dirname(jsn['@id']).split(':')[-1] - - errors = [] - close = [] - - # assert os.path.exists(loc), f"Path does not exist: {loc}" - # existing = [os.path.splitext(os.path.basename(f))[0] for f in glob.glob(f"{loc}/*.json")] - - if os.path.exists(iloc): - close.append(f"Current institution already exists:\n see {iloc}") - - - if path not in iloc: - errors.append(f"@id / location do not match:\n {path} || {jsn['@id']}") - - - graph = open(os.path.dirname(iloc)+"/graph.json",'r').read() - if jsn['institution:ror'] in graph: - close.append(f"ROR entry already exists in graph. \n EXITING: {jsn['institution:ror']}") - - - return close,errors \ No newline at end of file diff --git a/.github/libs/checks/schema.py b/.github/libs/checks/schema.py deleted file mode 100644 index 466e2cedc..000000000 --- a/.github/libs/checks/schema.py +++ /dev/null @@ -1,33 +0,0 @@ - -import os, json -import jsonschema -from jsonschema import validate - - -def rdjsn(f): - return json.load(open(f,'r')) - -def validate_json(jsn): - - if not isinstance(jsn, dict): - # if we do not give a file, read this - jsn = rdjsn(jsn) - name = os.path.basename(jsn['@id']) - - schema_url = os.path.dirname(jsn['@id']).split(':')[-1] - toplevel = os.popen('git rev-parse --show-toplevel').read().strip() - - schema_loc = f"{toplevel}/JSONLD/{schema_url}/schema.json" - # outfile guarantees that we must run this - - schema = rdjsn(schema_loc) - - try: - validate(instance=jsn, schema=schema) - print(f"Validation succeeded: {name}") - return True, f"Validation succeeded: {name}" - except jsonschema.exceptions.ValidationError as err: - print("Validation error:", err.message, name) - return False, "Validation error:\n {err.message}\n RelevantFile: {jsn['@id']}", False - - \ No newline at end of file diff --git a/.github/libs/checksum_tools.py b/.github/libs/checksum_tools.py deleted file mode 100644 index b20f55b7b..000000000 --- a/.github/libs/checksum_tools.py +++ /dev/null @@ -1,100 +0,0 @@ -# checksum_tools.py - -from copy import deepcopy -import hashlib -import json - -def calculate_checksum(dictionary, overwrite=True, checksum_location='version_metadata',nest = None,update=False): - """ - Calculate the checksum for dictionary and add it to the Header - - Parameters - ---------- - dictionary: dict - The dictionary to set the checksum for. - overwrite: bool - Overwrite the existing checksum (default True). - checksum_location: str - sub-dictionary to look for in /add the checksum to. - - Raises - ------ - RuntimeError - If the ``checksum`` key already exists and ``overwrite`` is - False. - """ - - - if 'checksum' in dictionary[checksum_location] : - if not overwrite: - raise RuntimeError('Checksum already exists.') - # del dictionary[checksum_location]['checksum'] - # blank the checksum rather than deleting it. This keeps the order. - if nest: - dictionary[checksum_location][nest]['checksum'] = '' - else: - dictionary[checksum_location]['checksum'] = '' - - cfrom = dictionary.copy() - del cfrom[checksum_location] - - checksum = _checksum(cfrom) - from pprint import pprint - - if update: - - if nest: - dictionary[checksum_location][nest]['checksum'] = checksum - else: - dictionary[checksum_location]['checksum'] = checksum - - return dictionary - - -def validate_checksum(dictionary, checksum_location='version_metadata',error = False): - """ - Validate the checksum in the ``dictionary``. - - Parameters - ---------- - dictionary: dict - The dictionary containing the ``checksum`` to validate. - checksum_location: str - sub-dictionary to look for in /add the checksum to. - - Raises - ------ - KeyError - If the ``checksum`` key does not exist. - RuntimeError - If the ``checksum`` value is invalid. - """ - - - - if ('checksum' not in dictionary[checksum_location]['file']): - raise KeyError('No checksum to validate') - - - dictionary_copy = deepcopy(dictionary) - - try: - del dictionary_copy[checksum_location] - # ['file']['checksum'] - except:... - checksum = _checksum(dictionary_copy) - if dictionary[checksum_location]['file'].get('checksum','no_checksum') != checksum: - msg = ('Expected checksum "{}"\n' - 'Calculated checksum "{}"').format(dictionary[checksum_location]['file']['checksum'],checksum) - print(msg) - if error: - raise RuntimeError(msg) - else: return False - return True - - -def _checksum(obj): - obj_str = json.dumps(obj, sort_keys=True) - checksum_hex = hashlib.md5(obj_str.encode('utf8')).hexdigest() - return 'md5: {}'.format(checksum_hex) - diff --git a/.github/libs/mergeTables.py b/.github/libs/mergeTables.py deleted file mode 100644 index cf5da539a..000000000 --- a/.github/libs/mergeTables.py +++ /dev/null @@ -1,127 +0,0 @@ -''' -A script to generate the searchable index and aggregate miptables for display. - -Contact: daniel.ellis@wcrp-cmip.org - -Installation: - https://pypi.org/project/lunr/ - pip install lunr -''' - - -import json -import glob -from collections import OrderedDict - -keys = [ - 'table', - 'out_name', - 'comment', - 'dimensions', - 'frequency', - 'long_name', - 'modeling_realm', - # 'ok_max_mean_abs', - # 'ok_min_mean_abs', - 'positive', - 'standard_name', - 'type', - 'units', - # 'valid_max', - # 'valid_min', - 'cell_measures', - 'cell_methods', - 'commit'] - -index = [ - 'table', - 'out_name', - 'comment', - 'dimensions', - # 'frequency', this is in the table name - 'long_name', - 'modeling_realm', - # 'ok_max_mean_abs', - # 'ok_min_mean_abs', - # 'positive', - 'standard_name', - # 'type', - # 'units', - # 'valid_max', - # 'valid_min', - # 'cell_measures', - # 'cell_methods', - # 'commit' - ] - - -def process_table(t): - with open(t, 'r') as file: - data = json.load(file) - - header = data['Header'] - table = header['table_id'] - commit = '' - - entries = data["variable_entry"] - for k in entries: - - entries[k]['table'] = table - entries[k]['commit'] = commit - # entries[k]['id'] = f"{table}_{entries[k]['out_name']}" - - - entries[k]= OrderedDict((key, entries[k][key]) for key in keys) - - return list(entries.values()) - - - -def main(): - - tables = glob.glob('Tables/*.json') - merged = [] - - for t in tables: - merged.extend(process_table(t)) - - with open('.github/mip-cmor-tables.json', 'w') as outfile: - for i,k in enumerate(merged): - k['id'] = i - outfile.write(f'{json.dumps(k)}\n') - - - # lets create the index and save it. - from lunr import lunr - - idx = lunr( - ref='id', fields=(index), documents=merged - ) - - serialized_idx = idx.serialize() - with open('.github/idx.json', 'w') as outfile: - json.dump(serialized_idx, outfile) - -if __name__ == "__main__": - main() - - -# Search all | field in main document. - - -''' -# To reverse the process - -from lunr.index import Index -with open("idx.json") as fd: - serialized_idx = json.loads(fd.read()) - -idx = Index.load(serialized_idx) - - - # note we can boost documents in the results list - # }, { 'boost': 10 } - -''' - - diff --git a/.github/libs/misc/_functions.py b/.github/libs/misc/_functions.py deleted file mode 100644 index 49e312f55..000000000 --- a/.github/libs/misc/_functions.py +++ /dev/null @@ -1,126 +0,0 @@ - - -import urllib.request -import json -from collections import OrderedDict -import sys,os - -import os,json -from typing import Dict,Any -from urllib import request - - -def sort_dict_recursive(input_dict): - if isinstance(input_dict, dict): - return OrderedDict((key, sort_dict_recursive(value)) for key, value in sorted(input_dict.items())) - elif isinstance(input_dict, list): - return [sort_dict_recursive(item) for item in input_dict] - else: - return input_dict - -def airtable_request(base_id,table_name,view_name,api_key): - url = f'https://api.airtable.com/v0/{base_id}/{table_name}?view={view_name}' - # Set up headers with your Airtable API key - headers = { - 'Authorization': f'Bearer {api_key}', - 'Content-Type': 'application/json', - } - # Create a request with headers - request = urllib.request.Request(url, headers=headers) - return request - - - -def fetch_json_from_github(username, repo, path): - try: - # Construct the raw JSON URL - raw_json_url = f"https://raw.githubusercontent.com/{username}/{repo}/master/{path}" - - # Make a GET request to fetch the raw content - with urllib.request.urlopen(raw_json_url) as response: - # Check if the request was successful (status code 200) - if response.getcode() == 200: - # Read the content - raw_data = response.read().decode('utf-8') - - # Parse the JSON content - json_data = json.loads(raw_data) - - # Return the parsed JSON data - return json_data - else: - # Print an error message if the request was not successful - print(f"Failed to fetch data. Status code: {response.getcode()}") - except Exception as e: - # Handle any exceptions that might occur during the request or parsing - print(f"An error occurred: {e}") - - - - -def get_latest_commit(repo_owner: str, repo_name: str, github_token: str) -> Dict[str,Any]: - """ - Retrieve information about the latest commit of a GitHub repository using GITHUB_TOKEN. - - Args: - repo_owner (str): Owner of the repository. - repo_name (str): Repository name. - - Returns: - dict: Dictionary containing details of the latest commit. - """ - try: - # Access the GITHUB_TOKEN directly within the GitHub Actions environment - # github_token = api - if not github_token: - print("GITHUB_TOKEN not available.") - # return {} - headers = {} - else: - headers = {"Authorization": f"Bearer {github_token}"} - - api_url_commits = f'https://api.github.com/repos/{repo_owner}/{repo_name}/commits' - - - api_url = f'https://www.github.com/repos/{repo_owner}/{repo_name}' - - with request.urlopen(request.Request(api_url_commits, headers=headers)) as response: - if response.getcode() == 200: - commits_data = json.loads(response.read().decode('utf-8')) - if commits_data: - latest_commit = commits_data[0] - commit_info = { - "SHA": latest_commit["sha"], - "Message": latest_commit["commit"]["message"], - "Author": f"{latest_commit['commit']['author']['name']} <{latest_commit['commit']['author']['email']}>", - "Committer": f"{latest_commit['commit']['committer']['name']} <{latest_commit['commit']['committer']['email']}>", - "Date": latest_commit["commit"]["author"]["date"] - } - else: - commit_info = {} - else: - print(f"Failed to retrieve commit data. Status code: {response.getcode()}") - return {} - - api_url_releases = f'https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest' - with request.urlopen(request.Request(api_url_releases, headers=headers)) as response: - if response.getcode() == 200: - release_data = json.loads(response.read().decode('utf-8')) - if release_data: - tag_info = { - "TagName": release_data["tag_name"], - "ReleaseName": release_data.get("name", ""), - "ReleaseBody": release_data.get("body", ""), - "ReleaseDate": release_data["published_at"] - } - else: - tag_info = {} - else: - print(f"Failed to retrieve release data. Status code: {response.getcode()}") - return {} - - return {**commit_info,**tag_info,**{'url':api_url}} - - except Exception as e: - print(f"An error occurred: {e}") - return {} diff --git a/.github/libs/misc/model_components.py b/.github/libs/misc/model_components.py deleted file mode 100644 index 0f1651984..000000000 --- a/.github/libs/misc/model_components.py +++ /dev/null @@ -1,60 +0,0 @@ -import urllib.request -import json -from collections import OrderedDict -import sys,os -from _functions import sort_dict_recursive,airtable_request - -base_id,table_name,view_name = 'appaZflpqbFjA6pwV/tblD5m3Bxsph5VjZ0/viwxN1LyTlEA2TZ5W'.split('/') - -# read from action -api_key = sys.argv[1] # Replace with your actual Airtable API key - -request = airtable_request(base_id,table_name,view_name,api_key) - -model_components = {} - -# Make the API request to get data in JSON format -with urllib.request.urlopen(request) as response: - # Process the response as needed - if response.status == 200: - data = json.loads(response.read().decode('utf-8')) - records = data.get('records', []) - for record in records: - fields = record.get('fields', {}) - realm = fields['realm'] - description = fields['description'] - resolution = fields['resolution']+' km' - if realm not in model_components: - model_components[realm] = {} - if description not in model_components[realm]: - model_components[realm][description] = {'description' : description, 'native_nominal_resolutions':[]} - - if resolution not in model_components[realm][description]['native_nominal_resolutions']: - model_components[realm][description]['native_nominal_resolutions'].append(resolution) - - - - else: - print(f"Failed to retrieve data. Status code: {response.status}") - - - - -model_components = sort_dict_recursive(model_components) - -if __name__ == '__main__': - - file_path = 'Auxillary_files/MIP_model_components.json' - - # Write the dictionary to the JSON file - with open(file_path, 'w') as json_file: - json.dump({'model_components':model_components}, json_file, indent=4) - - - import version - - tag = os.environ['GH_TOKEN'] - new_contents = version.process_files([file_path],tag=tag,write=False) - - with open(file_path, 'w') as json_file: - json.dump(new_contents, json_file, indent=4) \ No newline at end of file diff --git a/.github/libs/parse/Consortium.py b/.github/libs/parse/Consortium.py deleted file mode 100644 index d1bf8af12..000000000 --- a/.github/libs/parse/Consortium.py +++ /dev/null @@ -1,62 +0,0 @@ -import json, os, sys -from collections import OrderedDict - -# Get the parent directory of the current file -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -sys.path.append(parent_dir) - -import cmipld -from cmipld.action_functions import update_issue,jr,jw,getfile,close_issue - - - -# data -issue_number = os.environ['ISSUE'] -data = os.environ['PAYLOAD_DATA'] -data = json.loads(str(data)) - - -# Load Existing -consortiums = jr(getfile('consortiums')[0]) -institutions = jr(getfile('institutions')[0])['institutions'] - -# Add new value and sort -conly = consortiums["consortiums"] - - -if data['acronym'] in conly: - close_issue(issue_number,f'# Closing issue. \n {data["acronym"]} already exists in the consortium list. \n\n Please review request and resubmit.') - -error = '' -inst = {} -for i in data['institutions']: - if i not in institutions: - error += f' - Institution [{i}] does not exists in the institutions file. Please add this to proceed.\n' - else: - inst[i] = f"{i} [{institutions[i]['identifiers']['ror']} - {institutions[i]['identifiers']['institution_name']}]" - -if error: - error = '#Error: \n Pausing submission. Please edit the initial config (above) addressing the issues below to try again. \n\n ' + error - update_issue(issue_number,error) - - - -conly[data['acronym']] = {"name": data['name'], "contains": sorted(list(inst.values()))} - -sorted_consortiums = OrderedDict(sorted(conly.items())) - - -# Update data -data["consortiums"] = sorted_consortiums - -# Serialize back to JSON -new_json_data = jw(data, getfile('consortiums')[0]) - - - - - - - - - diff --git a/.github/libs/parse/Institution.py b/.github/libs/parse/Institution.py deleted file mode 100644 index 7c76fdef6..000000000 --- a/.github/libs/parse/Institution.py +++ /dev/null @@ -1,177 +0,0 @@ -import json, os, sys, glob -from collections import OrderedDict - - -from cmipld.git.repo_info import ldpath,commit, commit_override_author,addfile -from cmipld.utils.io import read_url -from cmipld.action_functions import update_issue,jr,jw,getfile,close_issue,pp - -path = f'organisations/institutions' -loc = ldpath(path) - -# Get the parent directory of the current file -parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -sys.path.append(parent_dir) -import checks -# from checks import schema,institution - -# data -issue_number = int(os.environ['ISSUE']) -data = os.environ['PAYLOAD_DATA'] -data = json.loads(str(data)) - -data['acronym'] = data['acronym'].replace(' ','') -print(data['acronym']) - -''' -Functions -''' - -URL_TEMPLATE = 'https://api.ror.org/organizations/{}' - - - -def get_ror_data(name): - """Get ROR data for a given institution name.""" - url = URL_TEMPLATE.format(name) - return read_url(url) - - - -def parse_ror_data(cmip_acronym,ror_data): - """Parse ROR data and return relevant information.""" - if ror_data: - - return { - "@id": f"mip-cmor-tables:organisations/institutions/{cmip_acronym.lower()}", - "@type": "cmip:institution", - "cmip_acronym": cmip_acronym, - "ror": ror_data['id'].split('/')[-1], - "name": ror_data['name'], - "url": ror_data.get('links', []) , - "established": ror_data.get('established'), - "type": ror_data.get('types', [])[0] if ror_data.get('types') else None, - "labels": [i['label'] for i in ror_data.get('lables', [])], - "aliases": ror_data.get('aliases', []), - "acronyms": ror_data.get('acronyms', []), - "location": { - "@id": f"mip-cmor-tables:organisations/institutions/location/{ror_data['id'].split('/')[-1]}", - "@type": "location", - "@nest": { - "lat": ror_data['addresses'][0].get('lat') if ror_data.get('addresses') else None, - "lon": ror_data['addresses'][0].get('lat') if ror_data.get('addresses') else None, - "city": ror_data['addresses'][0].get('city') if ror_data.get('addresses') else None, - "country": list(ror_data['country'].values()) if ror_data.get('country') else None - } - } - # can reverse match consortiums or members from here. - - } - else: - return None - - - -''' -Get the Data -''' - - -dta = get_ror_data(data['ror']) -new_entry = parse_ror_data(data['acronym'],dta) - - -outfile = f"{loc}{data['acronym'].lower()}.json" - -close,errors = checks.institution.validate(new_entry,outfile) - - -for error in close: - update_issue(issue_number,f'# Closing issue. \n {error} \n\n Please review request and resubmit.') - -for error in errors: - update_issue(issue_number,f'# {error} \n\n Please update (edit) the entry above.') - - -valid,validation_message = checks.schema.validate_json(new_entry) - -if valid: - update_issue(issue_number,validation_message,False) -else: - error = f"Schema Failed.\n\n Please update the entry above. {validation_message}" - # this exists the script. - update_issue(issue_number,error,err=True) - - - - - -update_issue(issue_number,f"# Sanity Check: \n Is '{data['full_name']}' the same as '{new_entry['name']}'",False) - -# print for pull request -pp( {data['acronym'] : new_entry }) - -jsn_ordered = OrderedDict(sorted(new_entry.items(), key=lambda item: item[0])) - - -if 'SUBMIT' in os.environ: - if len(close): - sys.exit(' skipping the submission.' ) - if os.environ['SUBMIT'] == 'none': - sys.exit(' skipping the submission.' ) - elif os.environ['SUBMIT'] == 'auto': - print("auto",outfile) - pass - else: - sys.exit(' skipping the submission.' ) - -# Serialize back to JSON -jw(jsn_ordered, outfile) - -# normal entries if not specified. - -addfile(outfile) -if not commit_override_author(data['acronym'],'Institutions'): - commit(f'New entry {data["acronym"]} to the Institutions LD file') - - - - - - - - - - - - - - - -# def search_ror(query): - -# import requests,json -# import urllib.parse - -# # Strip out strange characters and insert in the desired format -# format_name = lambda n : urllib.parse.quote(n) -# # Make the API call -# url = 'https://api.ror.org/organizations?affiliation=%{}s' - -# response = requests.get(url.format(query)) - -# # Check if the request was successful -# if response.status_code == 200: -# data = response.json() -# if data.get('items'): -# org = data['items'][0].get('organization') -# return data['items'][0]['score'],org['id'].split('/')[-1], org['name'] -# else: return None,None,None -# else: -# print(f"Error: {response.status_code} - {response.text}") -# return None,None,None - - - -# data = parsed['institutions'] -# data['institutions'] = parsed['institutions']['cmip6_acronyms'] \ No newline at end of file diff --git a/.github/libs/shell/reset_versions.sh b/.github/libs/shell/reset_versions.sh deleted file mode 100644 index c49c8658c..000000000 --- a/.github/libs/shell/reset_versions.sh +++ /dev/null @@ -1,21 +0,0 @@ -json_files=$(grep -rl '"version_metadata"' *.json) - -# Iterate over each JSON file and update the 'checksum' element -for json_file in $json_files; do - jq '.version_metadata.checksum = "reset"' "$json_file" > "$json_file.tmp" && mv "$json_file.tmp" "$json_file" -done - - -json_files=$(grep -rl '"version_metadata"' Auxillary*/*.json) - -# Iterate over each JSON file and update the 'checksum' element -for json_file in $json_files; do - jq '.version_metadata.checksum = "reset"' "$json_file" > "$json_file.tmp" && mv "$json_file.tmp" "$json_file" -done - -git add -A -git commit -m 'reset_checksum' -git push - - -# make sure your commit message is 'reset_checksum' \ No newline at end of file diff --git a/.github/libs/version.py b/.github/libs/version.py deleted file mode 100644 index 001d77dc6..000000000 --- a/.github/libs/version.py +++ /dev/null @@ -1,210 +0,0 @@ -import glob -import os -import sys -import re -import json -from collections import OrderedDict -from urllib.request import Request, urlopen -from checksum_tools import validate_checksum, calculate_checksum -from datetime import datetime -import argparse - -########################################## -# load the maintainer file -########################################## - -# maintainers = json.load(open('.github/maintainer_institutes.json', 'r')) -########################################## -# get repo information -########################################## -tag = os.popen("git describe --tags --abbrev=0").read().strip() -# release_date = subprocess.check_output(["git", "log", "-1", "--format=%aI", tag]).strip().decode("utf-8") - - -########################################## -# Get the Tag information from the CVs -########################################## -def get_latest_tag_info(repo_owner, repo_name, github_token=None): - tags_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/tags" - headers = {"Authorization": f"Bearer {github_token}"} if github_token else {} - - try: - # Get the list of tags - request = Request(tags_url, headers=headers) - with urlopen(request) as response: - tags_data = json.loads(response.read()) - - if tags_data: - # Select the latest tag - latest_tag = tags_data[0] - tag_name = latest_tag['name'] - commit_sha = latest_tag['commit']['sha'] - - return {"tag_name": tag_name, "commit_sha": commit_sha} - - except Exception as e: - print(f"Error: {e}") - - return None - - -def process_files(files,token = None, branch=None,force=False): - CVs = get_latest_tag_info('WCRP-CMIP', 'CMIP6Plus_CVs', token) - - for f in files: - print(f) - contents = json.load(open(f, 'r')) - - if 'version_metadata' not in contents: - contents['version_metadata'] = dict(checksum='', commit='') - - - # this now has a different setup due to CVs. - # if validate_checksum(contents): - # print(f,'checksum the same') - # continue - - - skip = '' - # commit_info = os.popen(f'git log -n 1 -- {f} ').read() - full = os.popen(f'git log -- {f} ').read() - - - previous_commit = '' - commit_info = False - - - - commit_blocks = re.split(r'\n(?=commit\s)', full) - for c in commit_blocks: - print(c) - if 'reset-checksum' in c: - continue - if 'Automated Versioning Update' in c: - continue - if skip not in c: - if not commit_info: - commit_info = c - elif commit_info and not previous_commit: - previous_commit = re.search(r"commit (\S+)", c) - break - - if 'commit_info' not in locals(): - print(f) - print(commit_blocks) - print('no suitable commit found') - sys.exit('no suitable commit found') - - - ########################################## - # extract commit info - ########################################## - - commit_dict = {} - - # Extract information using regular expressions - commit_match = re.search(r"commit (\S+)", commit_info) - author_match = re.search(r"Author: (.+)", commit_info) - date_match = re.search(r"Date: (.+)", commit_info) - commit_message_match = re.search(r" (.+)", commit_info) - - if commit_match: - commit_dict["commit_sha"] = commit_match.group(1) - - if author_match: - author_info = author_match.group(1).split(" <") - commit_dict["author_name"] = author_info[0] - # try: - # commit_dict["author_institute"] = maintainers[author_info[0]]['institute'] - # commit_dict["author_name"] = maintainers[author_info[0]]['published_name'] - # except: - commit_dict["author_name"] = author_match.group(1) - - # print( f'Please add \n\t "{author_info[0]}": \n\t\t','{"institute": "", "published_name": "Name you wish to use"}') - # this was a keyerror - - commit_dict["author_email"] = author_info[1][:-1] - - if date_match: - commit_dict["commit_date"] = date_match.group(1) - - if commit_message_match: - commit_dict["commit_message"] = commit_message_match.group(1) - - - - ########################################## - # create a new version metadata - ########################################## - - short = f.replace('.json','') - - try: - old_checksum = contents['version_metadata']['checksum'] - except: - old_checksum = '' - - template = OrderedDict({ - "version_tag": tag, - "checksum": 'checksum', - f"{short}_modified": commit_dict.get('commit_date','new file').lstrip(), - f"{short}_note": commit_dict.get('commit_message','no previous commit'), - "commit": commit_dict.get('commit_sha', 'none'), - "previous_commit": "", - "author": commit_dict.get('author_name', 'CMIP-IPO'), - # "institution_id": commit_dict.get('author_institute', 'CMIP-IPO'), - "CV_collection_version": CVs['tag_name'], - "specs_doc": "v6.5.0" - }) - - - # instituion to be sourced from the MIPCVs site - - contents = OrderedDict(contents) - del contents['version_metadata'] - contents['version_metadata'] = template - - contents = calculate_checksum(contents,update=True) - - # print('writing', f) - - if old_checksum != contents['version_metadata']['checksum'] or force: - - with open(f, 'w') as write: - write.write(json.dumps(contents, indent=4)) - - import pprint - pprint.pprint(contents['version_metadata']) - - ########################################## - # keep the individualized commit messages - ########################################## - - print(author_match.group(1),f) - print(commit_dict['commit_message']) - - os.popen(f"git add {f}").read() - - os.popen(f'git commit --author="{author_match.group(1)}" -m "{commit_dict["commit_message"]}"').read() - - os.popen('git push').read() - - - -if __name__ == "__main__": - files = glob.glob('*.json') - files.extend(glob.glob('Auxillary_files/*.json')) - - parser = argparse.ArgumentParser(description="Retrieve details for the latest tag of a GitHub repository.") - parser.add_argument("-t", "--token", help="gh token") - parser.add_argument("-b","--branch" ,help="branch name") - parser.add_argument("-n","--newrelease" ,help="tag name") - - args = parser.parse_args() - - if args.branch == 'main': - if args.newrelease: - process_files(files, token=args.token, branch = args.branch, force = True, ) - else: - process_files(files, token=args.token, branch = args.branch) - diff --git a/.github/workflows/dispatch_all.yml b/.github/workflows/legacy/dispatch_all.yml similarity index 100% rename from .github/workflows/dispatch_all.yml rename to .github/workflows/legacy/dispatch_all.yml diff --git a/.github/workflows/untitled folder/dispatch_consortium.yml b/.github/workflows/legacy/dispatch_consortium.yml similarity index 100% rename from .github/workflows/untitled folder/dispatch_consortium.yml rename to .github/workflows/legacy/dispatch_consortium.yml diff --git a/.github/workflows/untitled folder/dispatch_model_component.yml b/.github/workflows/legacy/dispatch_model_component.yml similarity index 100% rename from .github/workflows/untitled folder/dispatch_model_component.yml rename to .github/workflows/legacy/dispatch_model_component.yml diff --git a/.github/workflows/legacy/libs.zip b/.github/workflows/legacy/libs.zip new file mode 100644 index 0000000000000000000000000000000000000000..24316973a5552dc5597fa750a8d4c188c9c88597 GIT binary patch literal 18177 zcmcJ$1yGzxw?B-#dvJFP8r%ZG-Q6v?OK=SuJV0;_?oM!bcXxLP{)g) zI4A%B*$&p*wR`#|Crb_qnGD5h8>DS#j7j`-)@UBS_nis{4axiEAlUg zxh{qq8`Hb#Se83V<>tKJXbd0tWp=X+YXxrKyrJCe}6R|3=$8d(>u zO7(amUwECXz@xybwZcY zn&?xs!Dk-5Jd;A`;|Voew7GhB;etw zj=ZuC3GI66SBv8MmFAcxt;)nGqMV?ZV%3rliz0FeZ&H0Zh zXqMP^Qpj5up*)t9P|IM`G1;z`2pCheeL|Fk5$@uJA26W0uO7W-@8+x$dGJ~BN_;$E z7w-VMozm;yS#fK>2}BSlT}M0Sof^HRykvnFWWORqz0_7SIW6(*HvqN5rV#C$Smr%$ zj)1gl7BjEy7SSkSVj=Dt3sGfo>2VJeTZi943fx=VBss~EO;^g{l#!D21rf%|$!uaz z#Hp3DhNc)SuL#aw@G>4%TN=gIbQ$Q=8SCn2*=GF4RaqN}8#=m|o2bXpi)~(2ezm

7|zB8*?=q6%Go7SgAvenkuAK@%ZMtTQFc@pm1YZORT9v!Buf~;zH+kKD_slFT$rTX0-Hwg_Iq3Y21(~W%|O~Tc&>ql^be4fnQNP#;# za2?G%jr^Ydlv$2*!W1DDb%}R@c{K77X9}Qu%>8*mZ7pU&5#1*o8j~Ln|e5iZ?Tx=MesLj5&0rih+P>|BVRU7C29_lmW$3mCQwXb3U4B4 zzK=u|hOUE*LO?t?z;TXha}af|;bPi#$!n%ZS;^+v>pptJ>eOlch*Mk%4Nx*UJPIfZ zfG<$ci@Q!JrY_ z2HhvokJx_lB<7KldIl&;_%Xr;vVXqK5(yqz^%Ip5Ha(wRG9l7oL6~%7-j3W-ua0On0 zbaYjfN~sM+V_KluxE{W;q1LpQ}fU;UFG01-g+|_rzbvZ zDj&vI1%^(CkkB0>!elYSFZ>l1sV;gm3&OBaIg1+$q)6*e$F}G+8~4;};&2}^v6vFA zmUTL76YCe%E5Vcd8gbi^*H~N};aG6#nx$@AlGEJ9zDERUR8lb?Kxm9yxYkAP%5^5* zuQL~vixpG@($5Cr1Y&SauLRW zg$Y;6B!+kETx{bB=`W`A8=IvWfw}DXBspXb_q~Y>RIelpP^~Q3y|`60Nq+}j^j#*j zZh5ocxhLleP+hQcdrUl@Rcg`D2E_X!6+?_d{jo{q+vC#)J?MS58|5}>`5<8BWmQ2$zusp(vIi3-a;v|9fO*Ic14!{_HWxaw)6>KSS4Y9C98D;~ou21_8) zAh&|w>Bn6c^AAWQ(G?Y^{aT6fX<94O~^tt(1}+fCE@L^{G9FN0<* zlU#^W(8;& zQm7KM8-e;J@yuDbg8=yPy5O^ARZVIWa;5ol1UtIc7r~EcN}1~>lHl}Bgi`zq4BQEz{=fHPh)>k+VU&cbV zn2s?Os2_Lzf|YJAu;^jyF2JYL^>EVlAki1yB8&O@O}sWG=^3&_d8a^hfSEj6Q<}3P z90X;vz%dcmtRODIRSTOoP+^szfjo>yLEylq^Nue?Cv5E9k|z6wguk4$F}HT_>4kqN z<3YHw3q?oeXP&PZ`$ z30l>%?1*f+-t<#!QnMJkuj4x9tFO;hFgI^%3)_KwTOChO0zn``lG=kA3474tc~A{{k4Xlk7n3(UF6Jx(Vzi5f=xY$&sC?;ZQ9ow zhEsQUS5NvN9z;Y+AKqQWtC)OQws4sn>}-nIlH3fB1|4~|#H%x^p2652YJq4EgX~mI z7Uv9UjS)M75G)^)ecG(JyaA_t3XMfB7GOJZjZU(qCJ>QI?Z^+0+{OnBil@B`N-)Pt{ zU@<^K4Kx@~As&mM9=@^cIWmA;rI&^6B4ouw;)3g9@O4tCym9&%0nrnZXRt@9)dr(7 z#11t>aAsRGZ!F0j->p9pc30gg?MxGU>EH^JZN}>r9Hx-NBv)>PjHwC~DHg3*D}>8t zi|e*)eU7Z|FvE?&!WOxFc*_$YEf>3FfM3fjNN=)FxHx}^JFd!=dFEkE_Pd_P-VH3FY|s3__H z`)sPjD!x(ynyZvINdNjI~A^jP*efR~@ZJ zfWB?|yKkqhXY4A98jbT;(h!`qIYs^oInMTQC3}fXd1DUd{z$21yTbRS4d|7PfoiO3 zxdB&9Qihb@sMI_)tfpR#h1_kFg%f|4=ZAV!WYr=bu(eTIpjgET=uJ-LZTI8Yc=s@} zXI@Q+%c9+uv^-~7THv+`JHBHR9wSWj)e)0~Os#`+2Wr!cTdshF%=uCfSh~c94Kgh8 zcEHrx_GqyWq4Zwm!!s?!*1nl!m1zHNX$$nLnc zfWdYe?LbWnjtFQMVC|GC5LbXnO8SZm6(pJ~Yygv=HJuSXlpo>AQ%_GVK7wP)1vLtarZZiq3&*guyf+uK9t+zr2QE|z4`$J zAKB7uze}>bcdgwBm^S37^Fqg#!JW8*k>2uNSTs3t2q_0@T z8dsC(*cA;<(WHS!2_u)j{dx3B#SRH?Lk}@0c>?|YEiMe-Pm#5xnosLoF^XsUMBN4` ztpl4KTAP(%!ttJe%H3N?`M`zGzhgIxv^`b&0Ash7!Wy7@=?YZ5aa{5s_Qo7E8z(HD zWXh#2O5u#XXioLG zgh=PK3S4AKQGvu6DUYw$s1{c&dXR$i5{$Zs5C2{`%?uvcuGWb38Sq>3ePLrb$|3%Z zFhMmhmzpToR}45_>!QR+%g;BC`+}k){gdDbV2Wp#cEUew~uNxt5)!rP;rWskDTR z(IYf&QT5wc5!w$xLJICedhrT-BCM7bsItaeG$u|*95gEidKm4uJBT~#APTP!cNX1m z4xIJp<3()(41~VA4OPfvG<$>Ap_!k|Wcmha1YT5a6bQyby?+guH9&H|^>~(C1%+l` zq^;g?zs66(lSXYsIgNO#3spvo*JGl!^(F&XCq~yXZ^dFFVHL`UE)kd7vUbCcq(!-m z)H23*;>|L4f!66DuL3$qN_jcs<2|P@=_LSS{C*7SN1&B5k_PjSCGC_=r>Mthb}O$J zZ3kY{%O1C#egCfG{q?>+#ct@GuSz?wHt?k{*x6nN2=h$ZGUA%X=d0UgMGUHS1t4-6 zO>T$SHye47dvCubVB`~JzH;1RE0lcF8iAkIKSmL(8KWlZe{c5l$^seNCokMy@tdUV zaB955m^0^{vFL$K9ZV9^2$v)5?EALRId2_7kJy$%+6Twz7RZ$bn8?0Jkr5nqM+f

$mNqTSU1^(K=ne|> ziineCXjyMjpsBbK37;Z2nY11Q`tw1X_{d3Q6nr2Z${m}RI)dsB)pqV<#;%G8TFVbG2P-xSN0S zu?>8w`9ueInv#CeyCJlMhf${A3dBw%b33YW2u~m;4m!^6wZ~R8x*Z*RYMcV)WDk;n z3yE8PY{J_CBUW)dios4T%*m5)CAl3Pq`?7rKAR4^fQQ=k$0NLV?S?B=UM-U$8-fw> zT~ZNwsf#|Dx=PyU48^(fi1CW+GNwz>nwCYMcty14;f9rsk!d*lEtiJ4#S$*+T#Xxg zsblLqDs+`AxT-gNzSE+r`c0iHZ6=ky^)`WuIX0Bu@^!!Jiq~-hhH==SAz`F+$7_twv#M;*w@7JJI;kfj%<|z*3b6wI_d*$I+aN5b!Ok)vWoK;|%63d7HJd=wi z%07F~sHy;?RJ96~^x*+7!9-?WF3NJrMA`{b1GNMGSwDShM6A|+3V1x9)W6R#ZOY%u zVC(4%yW^xq;dKW9ng8Wflg!1eA?@dQxnfg5<()&8@pQuj{Zx6q5Br~np6UFGr zlDT9=anJ}xnlPU9K}D(uMHVNBn@C5=_9R&M++8YV)F``gCyJATM)zaV>=LT>>LL%3 z!gw?h3en(sZj=@AVVEl=_G3tU55z<9+%Y6cW}Hu?yk2~SAX_}p-S1Qlm4}Gkbuf_# z^aa=#&1@yICvF8fayn4tl4|8wCkpV5QK%ZVoqx1*a6a7-!&am3+O^Rz&fuiir_>_4 zJUBS0bf_mOZWaoDud_;jWxONWQ}t1kV;pSb^$0HWVdQNtK4UY3knvd15^z)>M>lLa zR3bCIc_og&U=@ZE*$u?hrj_UY$YIJ{uk&|6Y(iMEM7Y8X#AQ%@ziM*@!y1%Za`-ho zgiMTgIzC}wkmpDr(B_W|R(a^A`jTt8_VkuDtfU;DI)KabR6oM;6~H$T zmko6Y>>+JcYtc;AW&uTK$T8HV8T`-B#KgcHo(d@lt zeHw+ovDF80I#yV0236(vv(UwlapNX zbBlURTHvTSD=&^~{7@vjv>#GRwc}U+&Y&bKazCuj#S0Eej0dm%lU?25J(mu1DscxV zaWdCE8@btli!yG#-nijXXgIkG9>k;?3qIk8uu@2C4OV3X3|2b2Ti9LUM3??e+soUN zxzY`HD6pwj)eom8uc=F$t7JY@wyL6wS5Kp;#+fUakK*I%@Y;7by`%AIV)#M{IshF$ zc~wUwVy$hMJrR=rrSxE|*%ADM>-l1=kP1o;Xh`1v{4z%+BS$4GCLsA7{vz$5TvjH( zJ3hN3`5ag<6m*e2G;|wm*i|=IV3nq~=wU5+UHR@7xF`>k4&L+$fs&Kk-R8i=?G!V` z1HAR}t!`&4+zw?oRah_eovoEEYG8r?GO2Y@mwk;=dw8_olI%4*GL_Y9`cuLU)M(Vk z1{=p&yI!zjp08t_m-4TQCKnN*ET$bkcZW&jNO~`4l26CVE~TmSggT@9mKG1$)`fvy zXdy$AGdIc8ILTbaJyKdu3p|LkyGGD7<|WIuw{fC&xNm}}HA&5crFV!M?&I_8&wzWS z*i(GiI)9~$sGB3BDRN)%b{{nKfyY480vT%^S>&`7`$RY>XGgV?e1ijZhVIkMv4Tke zo4x#c_Xs{DY8rb=S0I?>sW(w0=$Dq}+t(q;cW~sN+#V*^mP<;t`0W5k@C>h1H56>v ze6zQrX>&XB3ms>&+A&-jvOgZ4V`P=GvkM+9jW2W`YdtH&w60QnjooifL)hkPtfC}!GQ#i*VrpwgZ4Q2gClmS5sL*#wuP8di< zuBjV_q!0wRogaA)D0+?X!X5-0|7lxbuF8rHyf?T_AlSgZm1Y$E+x$oZl1w-RnrNmT zY=PIP`h7n1FfkO79!WBi*8myUsVM+?1ASyBHK3{(0$oZf`A_!hzPAw2!zm^ zWixS25RQsk@WJ6P-V>=QC`>v)xO94O1-u&Q4orAk?1MBtlZAQ;q~g+0YvAhb$RTr$iKd3)_if3AG8GZ<3`trSMf#$=0KnCFVq0iRo#|m&whHpW; zlp}_$HCIExCol2O3yZivYKTz9vk+|DN*d2Z#fR}aUcQ-sMFY|bN*L0zJ)+d;;?Z?gUxz}O!%?IM}X;Xg!S z8^WSPR?Ni)@j9r;9zwU0LSU-0e#1=JthN|`8x5Z+{f@xpTC{MywjrRhZur4DIp-U} zMo46mx<-k3m2!ZN%A||D@tJVqyA9DQdQ_zgS(Q!s=zUMq9a5Lam51HFz4eF9aL1|A zOM}kIc@4hUh9}oUayaxxW_Dk#xh9`*p&YZiP{G-3f4k0Jya$w3#=?gK!`C2ATU9vu zl~>LewQ?NzHA%BU|7~%F3|SjDP`#2^L3io1pxw*YdJ#BTxd7CI5gIM zz$!MOQa#l8Jf0oqe3ZmTl~FOf3 zWfq>|Jh8MBZkBC1N7>mr%-s|X9IIkiuJv5|a5dE5*y7jKlr*fIYzqwBBFo){%DAu6-1kiF za_4veKHn>t=1$55pa1~RYX6@vgmrKL0GwYg1anJ$12Zi>OLHqr3j+(gzbx&S`hy?W z!>`2=SqkQsb8lcC4z&mh@Ya_(tLHS}0|2f+>Fh09n3tl`NE0ETB$`b0Mw0c2_@qC& zMX!^VPEo-ndV;*Z`1AqSKT~kQcun|yCd%1-^A%cw*xQOl<-$n65oU_YXleGe7?OpJ zk6)X4FBGG(8Z_p_=eAqW3U@h_?{raHc4i<;lkZ7YrFT3XEsf&MDC>Xq2Q2)kN@c6;jC~>dtEV0k$K1+E4r|HJgjdV#u zD@6lWb9kObwkG;;OeS(45x9ZSixQp_c@e?^`n|X(yNyLrY)Aep0@rmarpP#6#a4}| zN{i4d<67deFs}GycjD~?;v|@m*YdI4WR!b0M5U@q_$psZ4WXjTF?JtX&hA?+y=wM8 z@4pToTZh8_TC?_+{B&oo=e3HloNXLP)%_!L*SiHz(fTdK6*K`AaRVJ})24Tlp|mZt zpLS0M*HCyt6#zbVY?4-o)uX9McJKN4V6>$)8zZrk$0aDSEI55{&Brun&%V z93C-V3tC8%*zmi>XWJDsG0J#FVW>t-c`&T`Az;P^0s)YrJ*fJ%DwA%>ZGa4X=(v99 zG6cD?Q+7FSHjo z6oO{zsv-Q#BHzdE;o9&=%Vb?LL*dzch*w9O{SXTCE8iKj-lx^*&)F!m(;7}6@hIrY zwQoUQIiU<~Tnm~Hb@f{_d*zK5mQHDlpn#oP&)=DZNYiRGyjkVv-*`9c$p{pS0PguA zm}s-hK>QFq>gpIMPUv)f(MfkDTL6Py&sk-fKjiv#cF2S}dFS?*c-FalIy|Wvdc(tO1fgd9MV4`bv6dJ26kG1rZH`4ZH@oG>Vy`g`y|`w z5gKl(gtGI}WLv>g2`?bP-y8$Rpjsm5l;Siv2tMMH$%+cnePKJ$b1-zw*;>8==RmMj zx-!p1RKQMM|429o#3nksotf2|eb{&GgW%{Sf?`kT%K$bi5ik{g#VXD|yoeue=q>k7 zTH@V6M7dn57xpzV96`F)Wvfd8Eh*L`q4VOs-h9@SZukh2)_9R1G7U#QY-q|eIkH=V40JB)6z2!YAhNUAJ_~%b(U}!{R=%LMd#=l1Rrn$p|G0|PT z#mLYBT_lNx!ilJODXO^VR?Xquy2JLM2cr6Qo`Klh^uyDhfYwI_`WxD8pJrsP^Yc7->;4gFevuNEU+5A1 zkM4p-HUWg2^OQ1UT#RjV(-lqgHAl&9j!X6+%$W383Kj3J!mgF2FQws@!>>*n8rHfi zl8E5L7@Z7^GMsTc)sspTWF(3f`gIh)DCkw3VhBcbVhT5vN$ggH@Z$0>Md5r+w-zNU z5||m#4H~lpLVI8fMDjAS`w)*=aHc)2eA%mr0rJXN?pg&cxjxlZVAw()Z{QUlpE+iK zyVvNllz*`)8z5ZykhH~oNuHpJy}Dg*N)UPnMy&Vp>jz`IyXc~#Y=C4}QzJ2K{!pufuK^~4-=gGA4J z&wQUy8Yz#dP3n7zSTx*DG|6S1n@lD9cbzwxi}wlSYt&X+3W1pRka3@mOVl>b?r%gI zD{gqk*I^&!r_GP-Ul&5Ob@Y0W+JFQBe(ZHTtTLeA?$tm86AbR(C70xH!HT-d)0wHy z@=&WWJLcP5lDQ#=t&Q94hA(h5{Up1__kQ;A2T1H&Vo_S3;Q9=Ozd-^O5B_(77X|RM6SmGCuJ~ zO)lv?J689x4WSS1L2r2M4nGMR_%Opw9edy{Hs3Ytc6q)-)qktrTcAtHHoz2pAIMBw z#t%Wz?yRdauwfEM;QQSt^S))w>#!_zh^mg3A?(sFwcc#4Ca7q~-=QK0c)`GSjDZEO zlNE~t#kL}+t(aRSuzZEEFYD|P?cJ>hK|4?9?#zOj_mTgyr`_a@@|D4MH%V$Q4|H*` zbuye?Tw$;$Le47mvo_q4pf)+YVA&c9VCy{z*8{}oCi zNq+;t8u^dvsUJmCe@sl|Kd&U{=5j;cr{{V64F!!Da~5+f=NejkpP@8EFhUOiAak`1CFsP1cdl{=k_WkBx*aW;aZMpHXc4t^6#+*fj73R6J0K9BFg$ z@L5>*(L6%=N~U3ulD|H`S81jTDMR&@Nyfgqv3R~%{qATjpt|Rvqkga2s2ZI& z!6M}A#p=561fimA$MQDv);?Wfc{K+waH*3dzBm$q=qClwZxHCXUg1Z#+lTN$3YTz#4SRl0$LA;l3C<~HHTRB(72o|i7 zQz2hAgwxEa$~Vn88a};W#Z(2TB5ZxM^eL2U+YO@?L82L34Eg zc<=bY2BfS*71g|?IOed9SCd`AhtV}S8PqSDjz#>OlZgiixFOC{UiD-%Ksobue%98W z=JtffQyHuW}=*Y8L{3hM

OHO4t z7`f8y6A>CgaHwB5lI#4vKJwao{^a1G58n$mb9n?b5H zAAZ+nW6in{5*zOK+3*Adsd2lDjlLC6XNB>mIrsl$8(Z^^A^>xwiJ=fay5EBU=KHxn6`=L4Ulg+#USPy}cw+w5;=LUG zDaL-gn-@DFs^p*_ta}Ud?AOQ=i3v{BT2K9x28U2smjblSp@*PRWgEwjcgE1xgl z0-!cmdqFi=rLOaFg2MB{PI~5S>m3~~%uh{1J*oUR#j~)?AOL-tdkBR zAgG4VAxT&G3-sn>*{SJirgMb)5S6c$K?P|R=s^?NS>6^j4x`kxYP3ToS<}#S%?RB$%VRHOQpk&vizIQKCG){hd70Qn z_J-(L+@0@Er2Dl>)hk^MSJC(q0pK(Db5><)6T=>T2TZz+ne&2)nw`dF`a)2p*Mn^< zVo!-`A?oSoG(vdUp)iFgli0duLbltaN*=ck?4J)?>*2)(6L z^EqYe9xF33l0n+wOGN&qoBKT>XGnI*=esZG(1W{{&(=Ze7=!`plY9Mq!~Sg@Wd3A) z5rF-aZ2sf!6|l6hwY0G_u{VD)(wS@jxO$(*->zOid9i0B{oAclNH?G&LGG3?TR}pu zC@OPA6`MQ=0UZohj<%(F`P>2JVsKe#AdJ^C< zCTax@46|OC1s3L^XnSG`&IFRn$S%j>5NakeuDLNJu6pp=3HZe>SSX>UGB83UuUbG! zdaFix49`c)+SV;CgUdEjGO5{wRcO(m`B33hYZuasqN=;02|7jdCC%qtK#(37UzJ2l z6mO;8nA32@_SWK#ktK2;jKR7{#wiwBX9z(EiO0(NI)1&i2qi)r=P(EzT-%%2eW0WT znQN3=plMo|r={!b+nCx&1U*Zx6q=;9UL>U^@%^&v?`w4-$5wnUBri{ontwA`8Yj_- z6y=ou5&b$qmmr!HZwVfMt5a@5J88%OG)Hri*?7K9vWm-loY@MjD2+?HS)`LQfs@b% zEf11-qqG3xGA$Td8q<5P7T_~|g1EDI;&%}4$jD}XY5n8W6%1WLy@FgPu$fWuad2M4 zPV0SolVSy4F`G-0qk6O~fsz_I-_F%=+$4A>>N@uYij_=v^O(O#+gFXSXwopwco3p2VeRP`wjpftF>L5OTb>uSt)xz0QJ zxFIw2T|m%FJ4`F%jj9t9XYP_a*2On8PF+gBC8yN5DpVZF8uih^lVzJu_z`! zdE||7k}$>>U?=YG)%ve56_hvw0%HbET-lOv{iqV%67myBa`2}JalF39RgfQy6Lqsz z6n`_^+o-;#f$sC7v13viT``jt<==zmWbVhu+D%Qck|GuTgs%hemi55zV z9KD-3wNbOwfM0drb!}_Ff&4pprW7l+q^Uj0@OpH|96X+*9=Pm+ZqH)*rg>Hk&v~%) z2T}r+O;y+^vE7SE+GM3xU@pAUFg*IEZx|<=RUuo`U#bl2eKm(ucu72)8d`7ipx!`0 zE`XZm1z~<4@wCs7H%2f`D%NIbZD?_|AhpB)a=8|=X71$w7@8?4_a5t78XIG1J^mBV z6pu>;UG#!d5EmvGq8|x{XL~VNdee_A2lr@RJ;M_$pTYMxEURMw_N?ZgKKZL0@EMl> zsoTF+lpCfzrP80r-*kJZe4oYKQ|R=J3Sx^FQrh5rRKOG0bX+);)EZ$fpp_m%RrQ@2 zaU^OF3V(#hQDmf+$*3Zkwb7d|ZlB(=2vvuVSEQ--o9uw2y=xp6P_uI=_bN7F~BK|iUd>mau4k-jyuwzV;R`s7|PXZEf;kN zpaLnGs!53rC~dNFJ2aV#qv31e$mDZ)yvG$P5;}97Cs-Cuz@qv^sh%V$opg<+C3IIb zCo%{WqmcwrW-Lax0>(|cEK9Oihzd8!ew%64MjwwdtL(*VCNZm0F37%fuxE^4ERX)O z?-teYg{C+M{7^A&A#uFt?yMSk?8clZ={7jD!_e zax0_}I~*@F|Bi*uuB}W!8Y3Rh%lgw3AkT?%m7V~3ym7a8jz;0z4iwittHjQ>iDlJ^ zQ43~dlA`zpiMk1*oC;1N?bsJKRG*)a6C(D#qBciq%qXiFaw&6EukL}{KGWP6t0BPo zIpH&}V4y}vSRv_qnLKSr9?e?j%J$4oYKCN@QCR-9P!b>p{6?-5z<2~nD4nEEt~ zwCzNEG$*@)Rl&5Ei6+m8(EPE98XsE_=Je0KA3*W|T2bRz-69bg91J#rkJk;o z3>9A;-uI#oLHZM&p%Nz8>`&-?#^67rv*aleZ{jXvZ);LeyQ&<>* zKw$&~eA1-CT|oF~ymGC92yvo=Lla`uE#cW|FY-$556y5*FX1KK6hQeP^MS#Uwr?o9_*L3cxl=Exl!)9W%GqHy^5be z@JCPFzir_Bo%hd;ZqKcDFO=ye_hjJy8r=FX8t;A<^St&ymybVp&Am{j*U^)hA3x#W z_t3pCe`fOg#y*#Ozt9s_5dUQUZ|l+jM$GT6{m&KYFLb~s^1}FE*Qfu@`JXD(|K=R^ z!ub!c;YS|sf2~*l6^uXE!9HJ6FO=yu|HAl>DgPJs>o4p5jK}AS+MmnIo-1l!DAOwy z_0RSGKh)R0NO|V=vkmzC{<&1`g%0=%F<+$otHQNk7wG32hG*0Ag)+U)U)cWlg={Z# ze^CiP=U|`9Azmoc3zXo`x&ILIe<+UlUDVIH$>*f*3uSuc5&r`I|2oC{v!v$@`8h-O zoa=m{Ot1T=bk&bD{O4)-?=qji%lSF~_3I9;ko_v>zsSD*F68I@$#W|0g)+U~lm9B@ zUnkan7V(^j`8kL1oQQd$Os`Lre~S1sUGR(Yen}QQkH1-tzf1U8c|YeOUg$}yQvI&H z{}lD_GZQ}xdrm9-9Q640rih^a<>&l^kM_SL8GaV?96|ZnDg0Hrz%u51>9^_5AMt0k`|({{R30 literal 0 HcmV?d00001 diff --git a/.github/workflows/version_update.yml b/.github/workflows/legacy/version_update.yml similarity index 100% rename from .github/workflows/version_update.yml rename to .github/workflows/legacy/version_update.yml diff --git a/.github/workflows/new_issue_submission.yml b/.github/workflows/new_issue_submission.yml new file mode 100644 index 000000000..c376953b5 --- /dev/null +++ b/.github/workflows/new_issue_submission.yml @@ -0,0 +1,56 @@ +name: Process a new issue + +on: + issues: + types: [edited, labeled] + # opened + +jobs: + + process_template: + runs-on: ubuntu-latest + permissions: + actions: write + checks: write + contents: write + deployments: write + id-token: write + issues: write + # discussions: write + packages: write + pages: write + pull-requests: write + repository-projects: write + # security-events: write + statuses: write + + + + + steps: + + - name: Get a list of directories with updated files + id: install-cmipld + uses: WCRP-CMIP/CMIP-LD/actions/cmipld@main + + + - name: Checkout repository + uses: actions/checkout@v4 + + + - name: Run Python script + id: run_python + env: + ISSUE_TITLE: ${{ github.event.issue.title }} + ISSUE_BODY: | + ${{ github.event.issue.body }} + # pipe should preseve newline properties for multilines + ISSUE_SUBMITTER: ${{ github.event.issue.user.login }} + ISSUE_NUMBER: ${{ github.event.issue.number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + run: | + new_element + + + diff --git a/.github/workflows/new_x_from_issue.yml b/.github/workflows/new_x_from_issue.yml deleted file mode 100644 index 89f8dd4e9..000000000 --- a/.github/workflows/new_x_from_issue.yml +++ /dev/null @@ -1,116 +0,0 @@ -name: New Issue Processing - -on: - issues: - types: [edited, labeled] - # opened - -jobs: - - process_template: - runs-on: ubuntu-latest - permissions: - actions: write - checks: write - contents: write - deployments: write - id-token: write - issues: write - # discussions: write - packages: write - pages: write - pull-requests: write - repository-projects: write - # security-events: write - statuses: write - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.12' # Specify the Python version you need - - - name: Cache pip packages - uses: actions/cache@v3 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ hashFiles('.github/workflows/ci.yml') }} - restore-keys: | - ${{ runner.os }}-pip- - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - # pip install -r requirements.txt - - - name: Install and cache GitHub repo - run: | - # Install the GitHub repo - pip install git+https://github.com/WCRP-CMIP/CMIP-LD.git - - # Save the installed packages for caching - pip freeze > installed_packages.txt - - - name: Cache GitHub repo - uses: actions/cache@v3 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-github-repo-${{ hashFiles('installed_packages.txt') }} - restore-keys: | - ${{ runner.os }}-pip-github-repo- - - - - - - name: Read issue details - id: read_issue - run: | - repo_owner=${{ github.event.repository.owner.login }} - repo_name=${{ github.event.repository.name }} - repo_url="https://github.com/${repo_owner}/${repo_name}" - - echo "::set-output name=title::${{ github.event.issue.title }}" - echo "::set-output name=submitter::${{ github.event.issue.user.login }}" - echo "::set-output name=url::$repo_url" - - - name: Determine script to run - id: determine_script - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - # title=$(echo "${{ steps.read_issue.outputs.title }}" | tr -d '[:punct:]' | tr '[:upper:]' '[:lower:]' | tr -d ' ') - # # this flattens the title and removed spaces, punctuation and case - # if [[ "$title" == *"addconsortium"* ]]; then - - labels=$(gh issue view ${{ github.event.issue.number }} --json labels --jq '.labels[].name') - # {{github.event.issue.labels}} - - for label in $labels; do - echo "$label" - if [[ "$label" == *"add"* ]]; then - echo "::set-output name=script::add_new" - break - - fi - done - - - - name: Run Python script - id: run_python - env: - ISSUE_TITLE: ${{ steps.read_issue.outputs.title }} - ISSUE_BODY: | - ${{ github.event.issue.body }} - # pipe should preseve newline properties for multilines - ISSUE_SUBMITTER: ${{ steps.read_issue.outputs.submitter }} - ISSUE_NUMBER: ${{ github.event.issue.number }} - REPO: ${{ steps.read_issue.outputs.url }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - script="${{ steps.determine_script.outputs.script }}"