diff --git a/README.md b/README.md index aaea292..634f686 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +from zepben.eas.client.auth_method import TokenAuth + # Evolve App Server Python Client # This library provides a wrapper to the Evolve App Server's API, allowing users of the evolve SDK to authenticate with @@ -7,16 +9,18 @@ the Evolve App Server and upload studies. ```python from geojson import FeatureCollection -from zepben.eas import EasClient, Study, Result, Section, GeoJsonOverlay +from zepben.eas import EasClient, Study, Result, Section, GeoJsonOverlay, TokenAuth eas_client = EasClient( - host="", - port=1234, - access_token="", - client_id="", - username="", - password="", - client_secret="" + TokenAuth( + host="", + port=1234, + access_token="", + client_id="", + username="", + password="", + client_secret="" + ) ) eas_client.upload_study( @@ -66,17 +70,19 @@ To use the asyncio API use `async_upload_study` like so: ```python from aiohttp import ClientSession from geojson import FeatureCollection -from zepben.eas import EasClient, Study, Result, Section, GeoJsonOverlay +from zepben.eas import EasClient, Study, Result, Section, GeoJsonOverlay, TokenAuth async def upload(): eas_client = EasClient( - host="", - port=1234, - access_token="", - client_id="", - username="", - password="", - client_secret="", + TokenAuth( + host="", + port=1234, + access_token="", + client_id="", + username="", + password="", + client_secret="", + ), session=ClientSession(...) ) diff --git a/changelog.md b/changelog.md index cd43eb4..2194bde 100644 --- a/changelog.md +++ b/changelog.md @@ -77,6 +77,8 @@ and are now `before_cut_off_profile` and `after_cut_off_profile` respectively. ### Breaking Changes * Renamed the parameter `calibration_id` to `calibration_name` for the following methods `get_transformer_tap_settings` and `async_get_transformer_tap_settings`. This better reflects that this parameter is the user supplied calibration name rather than EAS's internal calibration run ID. +* EasClient will now need `auth=` passed with an auth object. either `BaseAuthMethod` or `TokenAuth`, this allows + cleaner documenting of accepted constructor arguments. ### New Features * Added optional fields to `ModelConfig` to control network simplification: `simplify_network`, `collapse_negligible_impedances`, and `combine_common_impedances`. @@ -90,7 +92,9 @@ and are now `before_cut_off_profile` and `after_cut_off_profile` respectively. * Added optional field `inverterControlConfig` to `ModelConfig`. This `PVVoltVARVoltWattConfig` allows the configuration of advanced inverter control profiles. ### Enhancements -* None. +* Internal: query bodys are now mostly self generating with `to_json` and `build_gql_query_object_model` methods. +* All request handling logic has been refactored into a single method. +* `catch_warnings` wrapper func to handle standard warning catching. ### Fixes * `TimePeriod` no longer truncates the `start_time` and `end_time` to midnight(`00:00:00`). `TimePeriod` will now preserve arbitrary start and end times to minute precision. diff --git a/setup.py b/setup.py index 352f6fc..223f5ab 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2025 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this diff --git a/src/zepben/eas/__init__.py b/src/zepben/eas/__init__.py index b701412..20d79cf 100644 --- a/src/zepben/eas/__init__.py +++ b/src/zepben/eas/__init__.py @@ -6,5 +6,6 @@ # from zepben.eas.client.eas_client import EasClient +from zepben.eas.client.auth_method import * from zepben.eas.client.study import * from zepben.eas.client.work_package import * diff --git a/src/zepben/eas/client/auth_method.py b/src/zepben/eas/client/auth_method.py new file mode 100644 index 0000000..e32dcf1 --- /dev/null +++ b/src/zepben/eas/client/auth_method.py @@ -0,0 +1,193 @@ +# Copyright 2025 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +__all__ = ['BaseAuthMethod', 'TokenAuth'] + + +from hashlib import sha256 +from typing import Optional, overload + +from aiohttp import ClientSession +from zepben.auth import ZepbenTokenFetcher, create_token_fetcher, AuthMethod, create_token_fetcher_managed_identity + + +class EasClient: + protocol: str = "https", + verify_certificate: bool = True, + ca_filename: Optional[str] = None, + session: ClientSession = None, + json_serialiser=None + + +class BaseAuthMethod: + def __init__(self, host, port, protocol='https', verify_certificate=True): + """ + + :param host: The domain of the Evolve App Server, e.g. "evolve.local" + :param port: The port on which to make requests to the Evolve App Server, e.g. 7624 + :param protocol: The protocol of the Evolve App Server. Should be either "http" or "https". Must be "https" if + auth is configured. (Defaults to "https") + :param verify_certificate: Set this to "False" to disable certificate verification. This will also apply to the + auth provider if auth is initialised via client id + username + password or + client_id + client_secret. (Defaults to True) + """ + self._host = host + self._port = port + self.protocol = protocol + self.verify_certificate = verify_certificate + + @property + def base_url_args(self) -> dict: + return dict(host=self._host, port=self._port, protocol=self.protocol) + + +class TokenAuth(BaseAuthMethod): + """ + Token Auth Method for Evolve App Server python client when connecting to HTTPS servers. + + Token Authentication may be configured in one of three ways: + - Providing an access token via the access_token parameter + - Specifying the client ID of the Auth0 application via the client_id parameter, plus one of the following: + - A username and password pair via the username and password parameters (account authentication) + - The client secret via the client_secret parameter (M2M authentication) + If this method is used, the auth configuration will be fetched from the Evolve App Server at the path + "/api/config/auth". + - Specifying a ZepbenTokenFetcher directly via the token_fetcher parameter + + ..code-block:: python:: + + TokenAuth(access_token='...') + TokenAuth(token_fetcher='...') + TokenAuth(client_id='...' username='...' password='...') + TokenAuth(client_id='...', client_secret='...') + + """ + @overload + def __init__(self, host, port, protocol='https', verify_certificate=True, *, access_token: str): + """ + :param access_token: The access token used for authentication, generated by Evolve App Server. + """ + ... + + @overload + def __init__(self, host, port, protocol='https', verify_certificate=True, *, token_fetcher: ZepbenTokenFetcher): + """ + :param token_fetcher: A ZepbenTokenFetcher used to fetch auth tokens for access to the Evolve App Server. + """ + ... + + @overload + def __init__(self, host, port, protocol='https', verify_certificate=True, *, client_id: str, username: str, password: str, client_secret: Optional[str]): + """ + :param client_id: The Auth0 client ID used to specify to the auth server which application to request a token for. + :param username: The username used for account authentication. + :param password: The password used for account authentication. + :param client_secret: The Auth0 client secret used for M2M authentication. (Optional) + """ + ... + + @overload + def __init__(self, host, port, protocol='https', verify_certificate=True, *, client_id: str, client_secret: str): + """ + :param client_id: The Auth0 client ID used to specify to the auth server which application to request a token for. + :param client_secret: The Auth0 client secret used for M2M authentication. + """ + ... + + def __init__(self, host, port, protocol='https', verify_certificate=True, **kwargs): + if protocol != 'https': # TODO: this exists because of an existing test, but given we can force it, we should + raise ValueError( + "Incompatible arguments passed to connect to secured Evolve App Server. " + "Authentication tokens must be sent via https. " + "To resolve this issue, exclude the \"protocol\" argument when initialising the EasClient.") + + super().__init__(host, port, protocol, verify_certificate) + self._token_fetcher = None + self._access_token = None + self._init_func = None + self._configure(kwargs) + + @property + def token(self) -> Optional[str]: + if self._access_token: + return f"Bearer {self._access_token}" + elif self._token_fetcher: + return self._token_fetcher.fetch_token() + raise AttributeError("access_token or token_fetcher method not configured") + + def _configure(self, kwargs: dict): + """ + Validates that the kwargs that end up being passed to the non-overloaded `__init__` method are of a valid + combination. + """ + match list(kwargs.keys()): + case ['access_token']: + self._access_token = kwargs['access_token'] + case ['token_fetcher']: + self._token_fetcher = kwargs['token_fetcher'] + case ['client_id', 'client_secret', 'username', 'password']: + self._configure_client_id(**kwargs) + case ['client_id', 'username', 'password']: + self._configure_client_id(**kwargs) + case ['client_id', 'client_secret']: + self._configure_client_id(**kwargs) + case _: + raise ValueError("Incompatible arguments passed to connect to secured Evolve App Server.") + + if kwargs.get('client_id'): + self._token_fetcher = create_token_fetcher( + conf_address=f"{self.protocol}://{self._host}:{self._port}/api/config/auth", + verify_conf=self.verify_certificate, + ) + + def _configure_client_id( + self, client_id: str = None, + username: str = None, + password: str = None, + client_secret: str = None + ): + self._token_fetcher = create_token_fetcher( + conf_address=f"{self.protocol}://{self._host}:{self._port}/api/config/auth", + verify_conf=self.verify_certificate, + ) + if self._token_fetcher: + scope = ( + 'trusted' if self._token_fetcher.auth_method is AuthMethod.SELF else 'offline_access openid profile email0' + ) + + self._token_fetcher.token_request_data.update({ + 'client_id': client_id, + 'scope': scope + }) + self._token_fetcher.refresh_request_data.update({ + "grant_type": "refresh_token", + 'client_id': client_id, + 'scope': scope + }) + if username and password: + self._token_fetcher.token_request_data.update({ + 'grant_type': 'password', + 'username': username, + 'password': + sha256(password.encode('utf-8')).hexdigest() + if self._token_fetcher.auth_method is AuthMethod.SELF + else password + }) + if client_secret: + self._token_fetcher.token_request_data.update({'client_secret': client_secret}) + + elif client_secret: + self._token_fetcher.token_request_data.update({ + 'grant_type': 'client_credentials', + 'client_secret': client_secret + }) + else: + # Attempt azure managed identity (what a hack) + url = "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01" + self._token_fetcher = create_token_fetcher_managed_identity( + identity_url=f"{url}&resource={client_id}", + verify_auth=self.verify_certificate + ) diff --git a/src/zepben/eas/client/decorators.py b/src/zepben/eas/client/decorators.py new file mode 100644 index 0000000..7ee0ee7 --- /dev/null +++ b/src/zepben/eas/client/decorators.py @@ -0,0 +1,19 @@ +# Copyright 2025 Zeppelin Bend Pty Ltd +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +__all__ = ['catch_warnings'] + +import functools +import warnings +from typing import Callable + + +def catch_warnings(func: Callable) -> Callable: + @functools.wraps(func) + def wrapper(*args, **kwargs): + with warnings.catch_warnings(): + return func(*args, **kwargs) + return wrapper diff --git a/src/zepben/eas/client/eas_client.py b/src/zepben/eas/client/eas_client.py index c2502d1..7d8eff3 100644 --- a/src/zepben/eas/client/eas_client.py +++ b/src/zepben/eas/client/eas_client.py @@ -1,32 +1,32 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2025 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. + +__all__ = ["EasClient"] + import ssl import warnings from asyncio import get_event_loop from dataclasses import asdict from datetime import datetime -from hashlib import sha256 from http import HTTPStatus from json import dumps -from typing import Optional, List +from typing import Optional, List, Any import aiohttp from aiohttp import ClientSession from urllib3.exceptions import InsecureRequestWarning -from zepben.auth import AuthMethod, ZepbenTokenFetcher, create_token_fetcher, create_token_fetcher_managed_identity +from zepben.eas.client.auth_method import BaseAuthMethod, TokenAuth +from zepben.eas.client.decorators import catch_warnings from zepben.eas.client.feeder_load_analysis_input import FeederLoadAnalysisInput -from zepben.eas.client.ingestor import IngestorConfigInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput +from zepben.eas.client.ingestor import IngestorConfigInput, IngestorRunsFilterInput, IngestorRunsSortCriteriaInput, IngestorRun from zepben.eas.client.opendss import OpenDssConfig, GetOpenDssModelsFilterInput, GetOpenDssModelsSortCriteriaInput from zepben.eas.client.study import Study from zepben.eas.client.util import construct_url -from zepben.eas.client.work_package import WorkPackageConfig, FixedTime, TimePeriod, ForecastConfig, FeederConfigs, \ - GeneratorConfig, ModelConfig - -__all__ = ["EasClient"] +from zepben.eas.client.work_package import WorkPackageConfig, GeneratorConfig, ModelConfig, WorkPackageProgress class EasClient: @@ -35,479 +35,122 @@ class EasClient: """ def __init__( - self, - host: str, - port: int, - protocol: str = "https", - client_id: Optional[str] = None, - username: Optional[str] = None, - password: Optional[str] = None, - access_token: Optional[str] = None, - client_secret: Optional[str] = None, - token_fetcher: Optional[ZepbenTokenFetcher] = None, - verify_certificate: bool = True, - ca_filename: Optional[str] = None, - session: ClientSession = None, - json_serialiser=None + self, + auth: BaseAuthMethod = None, + ca_filename: Optional[str] = None, + session: ClientSession = None, + json_serialiser = None ): """ Construct a client for the Evolve App Server. If the server is HTTPS, authentication may be configured. - Authentication may be configured in one of three ways: - - Providing an access token via the access_token parameter - - Specifying the client ID of the Auth0 application via the client_id parameter, plus one of the following: - - A username and password pair via the username and password parameters (account authentication) - - The client secret via the client_secret parameter (M2M authentication) - If this method is used, the auth configuration will be fetched from the Evolve App Server at the path - "/api/config/auth". - - Specifying a ZepbenTokenFetcher directly via the token_fetcher parameter Address parameters: - :param host: The domain of the Evolve App Server, e.g. "evolve.local" - :param port: The port on which to make requests to the Evolve App Server, e.g. 7624 - :param protocol: The protocol of the Evolve App Server. Should be either "http" or "https". Must be "https" if - auth is configured. (Defaults to "https") - - Authentication parameters: - :param client_id: The Auth0 client ID used to specify to the auth server which application to request a token - for. (Optional) - :param username: The username used for account authentication. (Optional) - :param password: The password used for account authentication. (Optional) - :param access_token: The access token used for authentication, generated by Evolve App Server. (Optional) - :param client_secret: The Auth0 client secret used for M2M authentication. (Optional) - :param token_fetcher: A ZepbenTokenFetcher used to fetch auth tokens for access to the Evolve App Server. - (Optional) + + :param auth: Auth method to use for connection. (Optional) HTTP/HTTPS parameters: - :param verify_certificate: Set this to "False" to disable certificate verification. This will also apply to the - auth provider if auth is initialised via client id + username + password or - client_id + client_secret. (Defaults to True) :param ca_filename: Path to CA file to use for verification. (Optional) :param session: aiohttp ClientSession to use, if not provided a new session will be created for you. You should typically only use one aiohttp session per application. :param json_serialiser: JSON serialiser to use for requests e.g. ujson.dumps. (Defaults to json.dumps) """ - self._protocol = protocol - self._host = host - self._port = port - self._verify_certificate = verify_certificate - self._ca_filename = ca_filename - self._access_token = access_token - if protocol != "https" and (token_fetcher or client_id or access_token): - raise ValueError( - "Incompatible arguments passed to connect to secured Evolve App Server. " - "Authentication tokens must be sent via https. " - "To resolve this issue, exclude the \"protocol\" argument when initialising the EasClient.") - - if access_token and (client_id or client_secret or username or password or token_fetcher): - raise ValueError( - "Incompatible arguments passed to connect to secured Evolve App Server. " - "You cannot provide multiple types of authentication. " - "When using an access_token, do not provide client_id, client_secret, username, password, or token_fetcher." - ) - - if token_fetcher and (client_id or client_secret or username or password or access_token): - raise ValueError( - "Incompatible arguments passed to connect to secured Evolve App Server. " - "You cannot provide both a token_fetcher and credentials, " - "please provide either client_id, client_id + client_secret, username + password, access_token or token_fetcher." - ) - - if client_id: - self._token_fetcher = create_token_fetcher( - conf_address=f"{self._protocol}://{self._host}:{self._port}/api/config/auth", - verify_conf=self._verify_certificate, - ) - if self._token_fetcher: - self._token_fetcher.token_request_data.update({ - 'client_id': client_id, - 'scope': - 'trusted' if self._token_fetcher.auth_method is AuthMethod.SELF - else 'offline_access openid profile email0' - }) - self._token_fetcher.refresh_request_data.update({ - "grant_type": "refresh_token", - 'client_id': client_id, - 'scope': - 'trusted' if self._token_fetcher.auth_method is AuthMethod.SELF - else 'offline_access openid profile email0' - }) - if username and password: - self._token_fetcher.token_request_data.update({ - 'grant_type': 'password', - 'username': username, - 'password': - sha256(password.encode('utf-8')).hexdigest() - if self._token_fetcher.auth_method is AuthMethod.SELF - else password - }) - if client_secret: - self._token_fetcher.token_request_data.update({'client_secret': client_secret}) - elif client_secret: - self._token_fetcher.token_request_data.update({ - 'grant_type': 'client_credentials', - 'client_secret': client_secret - }) - else: - # Attempt azure managed identity (what a hack) - url = "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01" - self._token_fetcher = create_token_fetcher_managed_identity( - identity_url=f"{url}&resource={client_id}", - verify_auth=self._verify_certificate - ) - elif token_fetcher: - self._token_fetcher = token_fetcher - else: - self._token_fetcher = None + self._ca_filename: Optional[str] = ca_filename + self._verify_certificate: bool = auth.verify_certificate + self._auth: BaseAuthMethod = auth if session is None: conn = aiohttp.TCPConnector(limit=200, limit_per_host=0) timeout = aiohttp.ClientTimeout(total=60) - self.session = aiohttp.ClientSession(json_serialize=json_serialiser or dumps, connector=conn, - timeout=timeout) + self.session: ClientSession = aiohttp.ClientSession( + json_serialize=json_serialiser or dumps, + connector=conn, + timeout=timeout + ) else: self.session = session def close(self): return get_event_loop().run_until_complete(self.aclose()) + def __del__(self): + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + async def aclose(self): await self.session.close() def _get_request_headers(self, content_type: str = "application/json") -> dict: headers = {"content-type": content_type} - if self._access_token: - headers["authorization"] = f"Bearer {self._access_token}" - elif self._token_fetcher: - headers["authorization"] = self._token_fetcher.fetch_token() + if isinstance(self._auth, TokenAuth): + if token := self._auth.token: + headers["authorization"] = token return headers - def generator_config_to_json(self, generator_config: Optional[GeneratorConfig]) -> Optional[dict]: - return generator_config and { - "model": generator_config.model and { - "vmPu": generator_config.model.vm_pu, - "loadVMinPu": generator_config.model.load_vmin_pu, - "loadVMaxPu": generator_config.model.load_vmax_pu, - "genVMinPu": generator_config.model.gen_vmin_pu, - "genVMaxPu": generator_config.model.gen_vmax_pu, - "loadModel": generator_config.model.load_model, - "collapseSWER": generator_config.model.collapse_swer, - "calibration": generator_config.model.calibration, - "pFactorBaseExports": generator_config.model.p_factor_base_exports, - "pFactorForecastPv": generator_config.model.p_factor_forecast_pv, - "pFactorBaseImports": generator_config.model.p_factor_base_imports, - "fixSinglePhaseLoads": generator_config.model.fix_single_phase_loads, - "maxSinglePhaseLoad": generator_config.model.max_single_phase_load, - "fixOverloadingConsumers": generator_config.model.fix_overloading_consumers, - "maxLoadTxRatio": generator_config.model.max_load_tx_ratio, - "maxGenTxRatio": generator_config.model.max_gen_tx_ratio, - "fixUndersizedServiceLines": generator_config.model.fix_undersized_service_lines, - "maxLoadServiceLineRatio": generator_config.model.max_load_service_line_ratio, - "maxLoadLvLineRatio": generator_config.model.max_load_lv_line_ratio, - "simplifyNetwork": generator_config.model.simplify_network, - "collapseLvNetworks": generator_config.model.collapse_lv_networks, - "collapseNegligibleImpedances": generator_config.model.collapse_negligible_impedances, - "combineCommonImpedances": generator_config.model.combine_common_impedances, - "feederScenarioAllocationStrategy": generator_config.model.feeder_scenario_allocation_strategy and generator_config.model.feeder_scenario_allocation_strategy.name, - "closedLoopVRegEnabled": generator_config.model.closed_loop_v_reg_enabled, - "closedLoopVRegReplaceAll": generator_config.model.closed_loop_v_reg_replace_all, - "closedLoopVRegSetPoint": generator_config.model.closed_loop_v_reg_set_point, - "closedLoopVBand": generator_config.model.closed_loop_v_band, - "closedLoopTimeDelay": generator_config.model.closed_loop_time_delay, - "closedLoopVLimit": generator_config.model.closed_loop_v_limit, - "defaultTapChangerTimeDelay": generator_config.model.default_tap_changer_time_delay, - "defaultTapChangerSetPointPu": generator_config.model.default_tap_changer_set_point_pu, - "defaultTapChangerBand": generator_config.model.default_tap_changer_band, - "splitPhaseDefaultLoadLossPercentage": generator_config.model.split_phase_default_load_loss_percentage, - "splitPhaseLVKV": generator_config.model.split_phase_lv_kv, - "swerVoltageToLineVoltage": generator_config.model.swer_voltage_to_line_voltage, - "loadPlacement": generator_config.model.load_placement and generator_config.model.load_placement.name, - "loadIntervalLengthHours": generator_config.model.load_interval_length_hours, - "meterPlacementConfig": generator_config.model.meter_placement_config and { - "feederHead": generator_config.model.meter_placement_config.feeder_head, - "distTransformers": generator_config.model.meter_placement_config.dist_transformers, - "switchMeterPlacementConfigs": generator_config.model.meter_placement_config.switch_meter_placement_configs and [ - { - "meterSwitchClass": spc.meter_switch_class and spc.meter_switch_class.name, - "namePattern": spc.name_pattern - } for spc in - generator_config.model.meter_placement_config.switch_meter_placement_configs - ], - "energyConsumerMeterGroup": generator_config.model.meter_placement_config.energy_consumer_meter_group - }, - "seed": generator_config.model.seed, - "defaultLoadWatts": generator_config.model.default_load_watts, - "defaultGenWatts": generator_config.model.default_gen_watts, - "defaultLoadVar": generator_config.model.default_load_var, - "defaultGenVar": generator_config.model.default_gen_var, - "transformerTapSettings": generator_config.model.transformer_tap_settings, - "ctPrimScalingFactor": generator_config.model.ct_prim_scaling_factor, - "useSpanLevelThreshold": generator_config.model.use_span_level_threshold, - "ratingThreshold": generator_config.model.rating_threshold, - "simplifyPLSIThreshold": generator_config.model.simplify_plsi_threshold, - "emergAmpScaling": generator_config.model.emerg_amp_scaling, - "inverterControlConfig": generator_config.model.inverter_control_config and { - "cutOffDate": generator_config.model.inverter_control_config.cut_off_date and generator_config.model.inverter_control_config.cut_off_date.isoformat(), - "beforeCutOffProfile": generator_config.model.inverter_control_config.before_cut_off_profile, - "afterCutOffProfile": generator_config.model.inverter_control_config.after_cut_off_profile - } - }, - "solve": generator_config.solve and { - "normVMinPu": generator_config.solve.norm_vmin_pu, - "normVMaxPu": generator_config.solve.norm_vmax_pu, - "emergVMinPu": generator_config.solve.emerg_vmin_pu, - "emergVMaxPu": generator_config.solve.emerg_vmax_pu, - "baseFrequency": generator_config.solve.base_frequency, - "voltageBases": generator_config.solve.voltage_bases, - "maxIter": generator_config.solve.max_iter, - "maxControlIter": generator_config.solve.max_control_iter, - "mode": generator_config.solve.mode and generator_config.solve.mode.name, - "stepSizeMinutes": generator_config.solve.step_size_minutes - }, - "rawResults": generator_config.raw_results and { - "energyMeterVoltagesRaw": generator_config.raw_results.energy_meter_voltages_raw, - "energyMetersRaw": generator_config.raw_results.energy_meters_raw, - "resultsPerMeter": generator_config.raw_results.results_per_meter, - "overloadsRaw": generator_config.raw_results.overloads_raw, - "voltageExceptionsRaw": generator_config.raw_results.voltage_exceptions_raw - }, - "nodeLevelResults": generator_config.node_level_results and { - "collectVoltage": generator_config.node_level_results.collect_voltage, - "collectCurrent": generator_config.node_level_results.collect_current, - "collectPower": generator_config.node_level_results.collect_power, - "mridsToCollect": generator_config.node_level_results.mrids_to_collect, - "collectAllSwitches": generator_config.node_level_results.collect_all_switches, - "collectAllTransformers": generator_config.node_level_results.collect_all_transformers, - "collectAllConductors": generator_config.node_level_results.collect_all_conductors, - "collectAllEnergyConsumers": generator_config.node_level_results.collect_all_energy_consumers, - } - } + @staticmethod + def build_request(query: str, variables: Optional[Any] = None) -> dict: + _json = {"query": query} + if variables is not None: + _json.update({"variables": variables}) + return _json - def work_package_config_to_json(self, work_package: Optional[WorkPackageConfig]) -> Optional[dict]: - return { - "feederConfigs": { - "configs": [ - { - "feeder": config.feeder, - "years": config.years, - "scenarios": config.scenarios, - "timePeriod": { - "startTime": config.load_time.start_time.isoformat(), - "endTime": config.load_time.end_time.isoformat(), - "overrides": config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in config.load_time.load_overrides.items() - ] - } if isinstance(config.load_time, TimePeriod) else None, - "fixedTime": config.load_time and { - "loadTime": config.load_time.load_time.isoformat(), - "overrides": config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in config.load_time.load_overrides.items() - ] - } if isinstance(config.load_time, FixedTime) else None, - } for config in work_package.syf_config.configs - ] - } if isinstance(work_package.syf_config, FeederConfigs) else None, - "forecastConfig": { - "feeders": work_package.syf_config.feeders, - "years": work_package.syf_config.years, - "scenarios": work_package.syf_config.scenarios, - "timePeriod": { - "startTime": work_package.syf_config.load_time.start_time.isoformat(), - "endTime": work_package.syf_config.load_time.end_time.isoformat(), - "overrides": work_package.syf_config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in work_package.syf_config.load_time.load_overrides.items() - ] - } if isinstance(work_package.syf_config.load_time, TimePeriod) else None, - "fixedTime": work_package.syf_config.load_time and { - "loadTime": work_package.syf_config.load_time.load_time.isoformat(), - "overrides": work_package.syf_config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in work_package.syf_config.load_time.load_overrides.items() - ] - } if isinstance(work_package.syf_config.load_time, FixedTime) else None - } if isinstance(work_package.syf_config, ForecastConfig) else None, - "qualityAssuranceProcessing": work_package.quality_assurance_processing, - "generatorConfig": self.generator_config_to_json(work_package.generator_config), - "executorConfig": {}, - "resultProcessorConfig": work_package.result_processor_config and { - "storedResults": work_package.result_processor_config.stored_results and { - "energyMeterVoltagesRaw": work_package.result_processor_config.stored_results.energy_meter_voltages_raw, - "energyMetersRaw": work_package.result_processor_config.stored_results.energy_meters_raw, - "overloadsRaw": work_package.result_processor_config.stored_results.overloads_raw, - "voltageExceptionsRaw": work_package.result_processor_config.stored_results.voltage_exceptions_raw, - }, - "metrics": work_package.result_processor_config.metrics and { - "calculatePerformanceMetrics": work_package.result_processor_config.metrics.calculate_performance_metrics - }, - "writerConfig": work_package.result_processor_config.writer_config and { - "writerType": work_package.result_processor_config.writer_config.writer_type and work_package.result_processor_config.writer_config.writer_type.name, - "outputWriterConfig": work_package.result_processor_config.writer_config.output_writer_config and { - "enhancedMetricsConfig": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config and { - "populateEnhancedMetrics": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_enhanced_metrics, - "populateEnhancedMetricsProfile": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_enhanced_metrics_profile, - "populateDurationCurves": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_duration_curves, - "populateConstraints": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_constraints, - "populateWeeklyReports": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.populate_weekly_reports, - "calculateNormalForLoadThermal": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_normal_for_load_thermal, - "calculateEmergForLoadThermal": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_emerg_for_load_thermal, - "calculateNormalForGenThermal": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_normal_for_gen_thermal, - "calculateEmergForGenThermal": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_emerg_for_gen_thermal, - "calculateCO2": work_package.result_processor_config.writer_config.output_writer_config.enhanced_metrics_config.calculate_co2 - } - } - } - }, - "intervention": work_package.intervention and { - "baseWorkPackageId": work_package.intervention.base_work_package_id, - "yearRange": { - "maxYear": work_package.intervention.year_range.max_year, - "minYear": work_package.intervention.year_range.min_year - }, - "allocationLimitPerYear": work_package.intervention.allocation_limit_per_year, - "interventionType": work_package.intervention.intervention_type.name, - "candidateGeneration": work_package.intervention.candidate_generation and { - "type": work_package.intervention.candidate_generation.type.name, - "interventionCriteriaName": work_package.intervention.candidate_generation.intervention_criteria_name, - "voltageDeltaAvgThreshold": work_package.intervention.candidate_generation.voltage_delta_avg_threshold, - "voltageUnderLimitHoursThreshold": work_package.intervention.candidate_generation.voltage_under_limit_hours_threshold, - "voltageOverLimitHoursThreshold": work_package.intervention.candidate_generation.voltage_over_limit_hours_threshold, - "tapWeightingFactorLowerThreshold": work_package.intervention.candidate_generation.tap_weighting_factor_lower_threshold, - "tapWeightingFactorUpperThreshold": work_package.intervention.candidate_generation.tap_weighting_factor_upper_threshold, - }, - "allocationCriteria": work_package.intervention.allocation_criteria, - "specificAllocationInstance": work_package.intervention.specific_allocation_instance, - "phaseRebalanceProportions": work_package.intervention.phase_rebalance_proportions and { - "a": work_package.intervention.phase_rebalance_proportions.a, - "b": work_package.intervention.phase_rebalance_proportions.b, - "c": work_package.intervention.phase_rebalance_proportions.c - }, - "dvms": work_package.intervention.dvms and { - "lowerLimit": work_package.intervention.dvms.lower_limit, - "upperLimit": work_package.intervention.dvms.upper_limit, - "lowerPercentile": work_package.intervention.dvms.lower_percentile, - "upperPercentile": work_package.intervention.dvms.upper_percentile, - "maxIterations": work_package.intervention.dvms.max_iterations, - "regulatorConfig": { - "puTarget": work_package.intervention.dvms.regulator_config.pu_target, - "puDeadbandPercent": work_package.intervention.dvms.regulator_config.pu_deadband_percent, - "maxTapChangePerStep": work_package.intervention.dvms.regulator_config.max_tap_change_per_step, - "allowPushToLimit": work_package.intervention.dvms.regulator_config.allow_push_to_limit - } - } - } - } - - def run_hosting_capacity_work_package(self, work_package: WorkPackageConfig): + def run_hosting_capacity_work_package(self, work_package: WorkPackageConfig) -> dict: """ Send request to hosting capacity service to run work package - :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run + :param work_package: An instance of the `WorkPackageConfig` data class representing the work package + configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ return get_event_loop().run_until_complete(self.async_run_hosting_capacity_work_package(work_package)) - def get_work_package_cost_estimation(self, work_package: WorkPackageConfig): + def get_work_package_cost_estimation(self, work_package: WorkPackageConfig) -> dict: """ Send request to hosting capacity service to get an estimate cost of supplied work package - :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run + :param work_package: An instance of the `WorkPackageConfig` data class representing the work package + configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ return get_event_loop().run_until_complete(self.async_get_work_package_cost_estimation(work_package)) - async def async_get_work_package_cost_estimation(self, work_package: WorkPackageConfig): + async def async_get_work_package_cost_estimation(self, work_package: WorkPackageConfig) -> dict: """ Send asynchronous request to hosting capacity service to get an estimate cost of supplied work package - :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run + :param work_package: An instance of the `WorkPackageConfig` data class representing the work package + configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getWorkPackageCostEstimation($input: WorkPackageInput!) { - getWorkPackageCostEstimation(input: $input) - } - """, - "variables": { - "workPackageName": work_package.name, - "input": self.work_package_config_to_json(work_package) - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return await self._do_post_request( + self.build_request(""" + query getWorkPackageCostEstimation($input: WorkPackageInput!) { + getWorkPackageCostEstimation(input: $input) + }""", work_package.to_json() + ) + ) - async def async_run_hosting_capacity_work_package(self, work_package: WorkPackageConfig): + async def async_run_hosting_capacity_work_package(self, work_package: WorkPackageConfig) -> dict: """ Send asynchronous request to hosting capacity service to run work package - :param work_package: An instance of the `WorkPackageConfig` data class representing the work package configuration for the run + :param work_package: An instance of the `WorkPackageConfig` data class representing the work package + configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation runWorkPackage($input: WorkPackageInput!, $workPackageName: String!) { - runWorkPackage(input: $input, workPackageName: $workPackageName) - } - """, - "variables": { - "workPackageName": work_package.name, - "input": self.work_package_config_to_json(work_package) - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return await self._do_post_request( + self.build_request(""" + mutation runWorkPackage($input: WorkPackageInput!, $workPackageName: String!) { + runWorkPackage(input: $input, workPackageName: $workPackageName) + }""", work_package.to_json() + ) + ) - def cancel_hosting_capacity_work_package(self, work_package_id: str): + def cancel_hosting_capacity_work_package(self, work_package_id: str) -> dict: """ Send request to hosting capacity service to cancel a running work package @@ -516,39 +159,24 @@ def cancel_hosting_capacity_work_package(self, work_package_id: str): """ return get_event_loop().run_until_complete(self.async_cancel_hosting_capacity_work_package(work_package_id)) - async def async_cancel_hosting_capacity_work_package(self, work_package_id: str): + async def async_cancel_hosting_capacity_work_package(self, work_package_id: str) -> dict: """ Send asynchronous request to hosting capacity service to cancel a running work package :param work_package_id: The id of the running work package to cancel :return: The HTTP response received from the Evolve App Server after attempting to cancel work package """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation cancelWorkPackage($workPackageId: ID!) { - cancelWorkPackage(workPackageId: $workPackageId) - } - """, - "variables": {"workPackageId": work_package_id} - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return await self._do_post_request( + self.build_request(""" + mutation cancelWorkPackage($workPackageId: ID!) { + cancelWorkPackage(workPackageId: $workPackageId) + }""",{ + "workPackageId": work_package_id + } + ) + ) - def get_hosting_capacity_work_packages_progress(self): + def get_hosting_capacity_work_packages_progress(self) -> dict: """ Retrieve running work packages progress information from hosting capacity service @@ -556,277 +184,138 @@ def get_hosting_capacity_work_packages_progress(self): """ return get_event_loop().run_until_complete(self.async_get_hosting_capacity_work_packages_progress()) - async def async_get_hosting_capacity_work_packages_progress(self): + async def async_get_hosting_capacity_work_packages_progress(self) -> dict: """ Asynchronously retrieve running work packages progress information from hosting capacity service :return: The HTTP response received from the Evolve App Server after requesting work packages progress info """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getWorkPackageProgress { - getWorkPackageProgress { - pending - inProgress { - id - progressPercent - pending - generation - execution - resultProcessing - failureProcessing - complete - } - } - } - """, - "variables": {} - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + return await self._do_post_request( + self.build_request(""" + query getWorkPackageProgress { + %s + }""" % WorkPackageProgress.build_gql_query_object_model() + ) + ) - def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): + def run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput) -> dict: """ Send request to evolve app server to run a feeder load analysis study - :param feeder_load_analysis_input:: An instance of the `FeederLoadAnalysisConfig` data class representing the configuration for the run + :param feeder_load_analysis_input:: An instance of the `FeederLoadAnalysisConfig` data class representing the + configuration for the run :return: The HTTP response received from the Evolve App Server after attempting to run work package """ return get_event_loop().run_until_complete( self.async_run_feeder_load_analysis_report(feeder_load_analysis_input)) - async def async_run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput): + async def async_run_feeder_load_analysis_report(self, feeder_load_analysis_input: FeederLoadAnalysisInput) -> dict: """ Asynchronously send request to evolve app server to run a feeder load analysis study :return: The HTTP response received from the Evolve App Server after requesting a feeder load analysis report """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": - """ - mutation runFeederLoadAnalysis($input: FeederLoadAnalysisInput!) { - runFeederLoadAnalysis(input: $input) - } - """, - "variables": { - "input": { - "feeders": feeder_load_analysis_input.feeders, - "substations": feeder_load_analysis_input.substations, - "subGeographicalRegions": feeder_load_analysis_input.sub_geographical_regions, - "geographicalRegions": feeder_load_analysis_input.feeders, - "startDate": feeder_load_analysis_input.start_date, - "endDate": feeder_load_analysis_input.end_date, - "fetchLvNetwork": feeder_load_analysis_input.fetch_lv_network, - "processFeederLoads": feeder_load_analysis_input.process_feeder_loads, - "processCoincidentLoads": feeder_load_analysis_input.process_coincident_loads, - "produceConductorReport": True, # We currently only support conductor report - "aggregateAtFeederLevel": feeder_load_analysis_input.aggregate_at_feeder_level, - "output": feeder_load_analysis_input.output, - "flaForecastConfig": - ({ - "scenarioID": feeder_load_analysis_input.fla_forecast_config.scenario_id, - "year": feeder_load_analysis_input.fla_forecast_config.year, - "pvUpgradeThreshold": feeder_load_analysis_input.fla_forecast_config.pv_upgrade_threshold, - "bessUpgradeThreshold": feeder_load_analysis_input.fla_forecast_config.bess_upgrade_threshold, - "seed": feeder_load_analysis_input.fla_forecast_config.seed - } if feeder_load_analysis_input.fla_forecast_config else None) - } + _json = feeder_load_analysis_input.to_json() + _json['geographicalRegions'] = _json.get('feeders', None) + return await self._do_post_request( + self.build_request(""" + mutation runFeederLoadAnalysis($input: FeederLoadAnalysisInput!) { + runFeederLoadAnalysis(input: $input) + }""",{ + "input": _json } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + ) + ) - def upload_study(self, study: Study): + def upload_study(self, study: Study) -> dict: """ Uploads a new study to the Evolve App Server + :param study: An instance of a data class representing a new study """ return get_event_loop().run_until_complete(self.async_upload_study(study)) - async def async_upload_study(self, study: Study): + async def async_upload_study(self, study: Study) -> dict: """ Uploads a new study to the Evolve App Server + :param study: An instance of a data class representing a new study :return: The HTTP response received from the Evolve App Server after attempting to upload the study """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation uploadStudy($study: StudyInput!) { - addStudies(studies: [$study]) - } - """, - "variables": { - "study": { - "name": study.name, - "description": study.description, - "tags": study.tags, - "styles": study.styles, - "results": [{ - "name": result.name, - "geoJsonOverlay": result.geo_json_overlay and { - "data": result.geo_json_overlay.data, - "sourceProperties": result.geo_json_overlay.source_properties, - "styles": result.geo_json_overlay.styles - }, - "stateOverlay": result.state_overlay and { - "data": result.state_overlay.data, - "styles": result.state_overlay.styles - }, - "sections": [{ - "type": section.type, - "name": section.name, - "description": section.description, - "columns": section.columns, - "data": section.data - } for section in result.sections] - } for result in study.results] - } + return await self._do_post_request( + self.build_request(""" + mutation uploadStudy($study: StudyInput!) { + addStudies(studies: [$study]) + }""", { + "study": study.to_json() } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + ) + ) - def run_ingestor(self, run_config: List[IngestorConfigInput]): + def run_ingestor(self, run_config: List[IngestorConfigInput]) -> dict: """ Send request to perform an ingestor run + :param run_config: A list of IngestorConfigInput :return: The HTTP response received from the Evolve App Server after attempting to run the ingestor """ return get_event_loop().run_until_complete( self.async_run_ingestor(run_config)) - async def async_run_ingestor(self, run_config: List[IngestorConfigInput]): + async def async_run_ingestor(self, run_config: List[IngestorConfigInput]) -> dict: """ Send asynchronous request to perform an ingestor run + :param run_config: A list of IngestorConfigInput :return: The HTTP response received from the Evolve App Server after attempting to run the ingestor """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation executeIngestor($runConfig: [IngestorConfigInput!]) { - executeIngestor(runConfig: $runConfig) - } - """, - "variables": { + return await self._do_post_request( + self.build_request(""" + mutation executeIngestor($runConfig: [IngestorConfigInput!]) { + executeIngestor(runConfig: $runConfig) + }""", { "runConfig": [asdict(x) for x in run_config], } - } - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + ) + ) - def get_ingestor_run(self, ingestor_run_id: int): + def get_ingestor_run(self, ingestor_run_id: int) -> dict: """ Send request to retrieve the record of a particular ingestor run. + :param ingestor_run_id: The ID of the ingestor run to retrieve execution information about. :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). """ return get_event_loop().run_until_complete( self.async_get_ingestor_run(ingestor_run_id)) - async def async_get_ingestor_run(self, ingestor_run_id: int): + async def async_get_ingestor_run(self, ingestor_run_id: int) -> dict: """ Send asynchronous request to retrieve the record of a particular ingestor run. + :param ingestor_run_id: The ID of the ingestor run to retrieve execution information about. :return: The HTTP response received from the Evolve App Server including the ingestor run information (if found). """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getIngestorRun($id: Int!) { - getIngestorRun(id: $id) { - id - containerRuntimeType, - payload, - token, - status, - startedAt, - statusLastUpdatedAt, - completedAt - } + return await self._do_post_request( + self.build_request(""" + query getIngestorRun($id: Int!) { + getIngestorRun(id: $id) { + %s } - """, - "variables": { + }""" % IngestorRun.build_gql_query_object_model(), { "id": ingestor_run_id, } - } - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - raise response.raise_for_status() + ) + ) - def get_ingestor_run_list(self, query_filter: Optional[IngestorRunsFilterInput] = None, - query_sort: Optional[IngestorRunsSortCriteriaInput] = None): + def get_ingestor_run_list( + self, + query_filter: Optional[IngestorRunsFilterInput] = None, + query_sort: Optional[IngestorRunsSortCriteriaInput] = None + ) -> dict: """ Send request to retrieve a list of ingestor run records matching the provided filter parameters. + :param query_filter: An `IngestorRunsFilterInput` object. Only records matching the provided values will be returned. If not supplied all records will be returned. (Optional) :param query_sort: An `IngestorRunsSortCriteriaInput` that can control the order of the returned record based on a number of fields. (Optional) @@ -835,72 +324,47 @@ def get_ingestor_run_list(self, query_filter: Optional[IngestorRunsFilterInput] return get_event_loop().run_until_complete( self.async_get_ingestor_run_list(query_filter, query_sort)) - async def async_get_ingestor_run_list(self, query_filter: Optional[IngestorRunsFilterInput] = None, - query_sort: Optional[IngestorRunsSortCriteriaInput] = None): + async def async_get_ingestor_run_list( + self, + query_filter: Optional[IngestorRunsFilterInput] = None, + query_sort: Optional[IngestorRunsSortCriteriaInput] = None + ) -> dict: """ Send asynchronous request to retrieve a list of ingestor run records matching the provided filter parameters. + :param query_filter: An `IngestorRunsFilterInput` object. Only records matching the provided values will be returned. If not supplied all records will be returned. (Optional) :param query_sort: An `IngestorRunsSortCriteriaInput` that can control the order of the returned record based on a number of fields. (Optional) :return: The HTTP response received from the Evolve App Server including all matching ingestor records found. """ - - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query listIngestorRuns($filter: IngestorRunsFilterInput, $sort: IngestorRunsSortCriteriaInput) { - listIngestorRuns(filter: $filter, sort: $sort) { - id - containerRuntimeType, - payload, - token, - status, - startedAt, - statusLastUpdatedAt, - completedAt - } + _json = {} + if query_filter is not None: + _json['filter'] = query_filter.to_json() + if query_sort is not None: + _json['sort'] = query_sort.to_json() + + return await self._do_post_request( + self.build_request(""" + query listIngestorRuns($filter: IngestorRunsFilterInput, $sort: IngestorRunsSortCriteriaInput) { + listIngestorRuns(filter: $filter, sort: $sort) { + %s } - """, - "variables": { - **({"filter": { - "id": query_filter.id, - "status": query_filter.status and [state.name for state in query_filter.status], - "completed": query_filter.completed, - "containerRuntimeType": query_filter.container_runtime_type and [runtime.name for runtime in - query_filter.container_runtime_type] - }} if query_filter else {}), - **({"sort": { - "status": query_sort.status and query_sort.status.name, - "startedAt": query_sort.started_at and query_sort.started_at.name, - "statusLastUpdatedAt": query_sort.status_last_updated_at and query_sort.status_last_updated_at.name, - "completedAt": query_sort.completed_at and query_sort.completed_at.name, - "containerRuntimeType": query_sort.container_runtime_type and query_sort.container_runtime_type.name, - }} if query_sort else {}) - } - } - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - raise response.raise_for_status() + }""" % IngestorRun.build_gql_query_object_model(), _json + ) + ) - def run_hosting_capacity_calibration(self, calibration_name: str, local_calibration_time: datetime, - feeders: Optional[List[str]] = None, - transformer_tap_settings: Optional[str] = None, - generator_config: Optional[GeneratorConfig] = None): + def run_hosting_capacity_calibration( + self, + calibration_name: str, + local_calibration_time: datetime, + feeders: Optional[List[str]] = None, + transformer_tap_settings: Optional[str] = None, + generator_config: Optional[GeneratorConfig] = None + ) -> dict: """ - Send request to run hosting capacity calibrationbration name + Send request to run hosting capacity calibration + + :param calibration_name: A string representation of the calibration name :param local_calibration_time: A datetime representation of the calibration time, in the timezone of your pqv data ("model time"). :param feeders: A list of feeder ID's to run the calibration over. If not supplied then the calibration is run over all feeders in the network. :param transformer_tap_settings: A set of transformer tap settings to apply before running the calibration work package. @@ -908,7 +372,6 @@ def run_hosting_capacity_calibration(self, calibration_name: str, local_calibrat :param generator_config: A `GeneratorConfig` object that overrides the default values in the `WorkPackageConfig` used by calibration. Note: The following fields cannot be overridden during calibration: generator_config.model.calibration, generator_config.model.meter_placement_config, generator_config.solve.step_size_minutes, and generator_config.raw_results. - :return: The HTTP response received from the Evolve App Server after attempting to run the calibration """ return get_event_loop().run_until_complete( @@ -916,13 +379,17 @@ def run_hosting_capacity_calibration(self, calibration_name: str, local_calibrat transformer_tap_settings, generator_config)) - async def async_run_hosting_capacity_calibration(self, calibration_name: str, - calibration_time_local: datetime, - feeders: Optional[List[str]] = None, - transformer_tap_settings: Optional[str] = None, - generator_config: Optional[GeneratorConfig] = None): + async def async_run_hosting_capacity_calibration( + self, + calibration_name: str, + calibration_time_local: datetime, + feeders: Optional[List[str]] = None, + transformer_tap_settings: Optional[str] = None, + generator_config: Optional[GeneratorConfig] = None + ) -> dict: """ Send asynchronous request to run hosting capacity calibration + :param calibration_name: A string representation of the calibration name :param calibration_time_local: A datetime representation of the calibration time, in the timezone of your pqv data ("model time"). :param feeders: A list of feeder ID's to run the calibration over. If not supplied then the calibration is run over all feeders in the network. @@ -933,11 +400,7 @@ async def async_run_hosting_capacity_calibration(self, calibration_name: str, :return: The HTTP response received from the Evolve App Server after attempting to run the calibration """ - - # Only replace microsecond, as in database we only have down to second precision. - # tzinfo will be whatever the user passed through, which should be the timezone of their load data. parsed_time = calibration_time_local.replace(microsecond=0, tzinfo=None) - if transformer_tap_settings: if generator_config: if generator_config.model: @@ -947,130 +410,86 @@ async def async_run_hosting_capacity_calibration(self, calibration_name: str, else: generator_config = GeneratorConfig(model=ModelConfig(transformer_tap_settings=transformer_tap_settings)) - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation runCalibration($calibrationName: String!, $calibrationTimeLocal: LocalDateTime, $feeders: [String!], $generatorConfig: HcGeneratorConfigInput) { - runCalibration(calibrationName: $calibrationName, calibrationTimeLocal: $calibrationTimeLocal, feeders: $feeders, generatorConfig: $generatorConfig) - } - """, - "variables": { + return await self._do_post_request( + self.build_request(""" + mutation runCalibration($calibrationName: String!, $calibrationTimeLocal: LocalDateTime, $feeders: [String!], $generatorConfig: HcGeneratorConfigInput) { + runCalibration(calibrationName: $calibrationName, calibrationTimeLocal: $calibrationTimeLocal, feeders: $feeders, generatorConfig: $generatorConfig) + }""", { "calibrationName": calibration_name, "calibrationTimeLocal": parsed_time.isoformat(), "feeders": feeders, - "generatorConfig": self.generator_config_to_json(generator_config) + "generatorConfig": generator_config.to_json() if generator_config is not None else None, } - } - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - raise response.raise_for_status() + ) + ) - def get_hosting_capacity_calibration_run(self, id: str): + def get_hosting_capacity_calibration_run(self, id_: str) -> dict: """ Retrieve information of a hosting capacity calibration run - :param id: The calibration run ID + + :param id_: The calibration run ID :return: The HTTP response received from the Evolve App Server after requesting calibration run info """ - return get_event_loop().run_until_complete(self.async_get_hosting_capacity_calibration_run(id)) + return get_event_loop().run_until_complete(self.async_get_hosting_capacity_calibration_run(id_)) - async def async_get_hosting_capacity_calibration_run(self, id: str): + async def async_get_hosting_capacity_calibration_run(self, id_: str) -> dict: """ Retrieve information of a hosting capacity calibration run - :param id: The calibration run ID + + :param id_: The calibration run ID :return: The HTTP response received from the Evolve App Server after requesting calibration run info """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getCalibrationRun($id: ID!) { - getCalibrationRun(id: $id) { - id - name - workflowId - runId - calibrationTimeLocal - startAt - completedAt - status - feeders - calibrationWorkPackageConfig - } + return await self._do_post_request( + self.build_request(""" + query getCalibrationRun($id: ID!) { + getCalibrationRun(id: $id) { + id + name + workflowId + runId + calibrationTimeLocal + startAt + completedAt + status + feeders + calibrationWorkPackageConfig } - """, - "variables": { - "id": id - } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + }""", {"id": id_} + ) + ) - def get_hosting_capacity_calibration_sets(self): + def get_hosting_capacity_calibration_sets(self) -> dict: """ Retrieve a list of all completed calibration runs initiated through Evolve App Server + :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs """ return get_event_loop().run_until_complete(self.async_get_hosting_capacity_calibration_sets()) - async def async_get_hosting_capacity_calibration_sets(self): + async def async_get_hosting_capacity_calibration_sets(self) -> dict: """ Retrieve a list of all completed calibration runs initiated through Evolve App Server + :return: The HTTP response received from the Evolve App Server after requesting completed calibration runs """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query { + return await self._do_post_request( + self.build_request(""" + query { getCalibrationSets - } - """ - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + }""" + ) + ) - def get_transformer_tap_settings(self, calibration_name: str, feeder: Optional[str] = None, - transformer_mrid: Optional[str] = None): + def get_transformer_tap_settings( + self, + calibration_name: str, + feeder: Optional[str] = None, + transformer_mrid: Optional[str] = None + ) -> dict: """ - Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database - :param calibration_name: The (user supplied)name of the calibration run to retrieve transformer tap settings from + Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database. + + :param calibration_name: The (user supplied) name of the calibration run to retrieve transformer tap settings from :param feeder: An optional filter to apply to the returned list of transformer tap settings :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id @@ -1078,140 +497,76 @@ def get_transformer_tap_settings(self, calibration_name: str, feeder: Optional[s return get_event_loop().run_until_complete( self.async_get_transformer_tap_settings(calibration_name, feeder, transformer_mrid)) - async def async_get_transformer_tap_settings(self, calibration_name: str, feeder: Optional[str] = None, - transformer_mrid: Optional[str] = None): + async def async_get_transformer_tap_settings( + self, + calibration_name: str, + feeder: Optional[str] = None, + transformer_mrid: Optional[str] = None + ) -> dict: """ - Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database - :param calibration_name: The (user supplied)name of the calibration run to retrieve transformer tap settings from + Retrieve distribution transformer tap settings from a calibration set in the hosting capacity input database. + + :param calibration_name: The (user supplied) name of the calibration run to retrieve transformer tap settings from :param feeder: An optional filter to apply to the returned list of transformer tap settings :param transformer_mrid: An optional filter to return only the transformer tap settings for a particular transfomer mrid :return: The HTTP response received from the Evolve App Server after requesting transformer tap settings for the calibration id """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query getTransformerTapSettings($calibrationName: String!, $feeder: String, $transformerMrid: String) { - getTransformerTapSettings(calibrationName: $calibrationName, feeder: $feeder, transformerMrid: $transformerMrid) { - id - highStep - lowStep - nominalTapNum - tapPosition - controlEnabled - stepVoltageIncrement - } - } - """, - "variables": { + return await self._do_post_request( + self.build_request(""" + query getTransformerTapSettings($calibrationName: String!, $feeder: String, $transformerMrid: String) { + getTransformerTapSettings(calibrationName: $calibrationName, feeder: $feeder, transformerMrid: $transformerMrid) { + id + highStep + lowStep + nominalTapNum + tapPosition + controlEnabled + stepVoltageIncrement + } + }""", { "calibrationName": calibration_name, "feeder": feeder, - "transformerMrid": transformer_mrid + "transformerMrid": transformer_mrid } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + ) + ) - def run_opendss_export(self, config: OpenDssConfig): + def run_opendss_export(self, config: OpenDssConfig) -> dict: """ Send request to run an opendss export + :param config: The OpenDssConfig for running the export :return: The HTTP response received from the Evolve App Server after attempting to run the opendss export """ return get_event_loop().run_until_complete(self.async_run_opendss_export(config)) - async def async_run_opendss_export(self, config: OpenDssConfig): + async def async_run_opendss_export(self, config: OpenDssConfig) -> dict: """ Send asynchronous request to run an opendss export + :param config: The OpenDssConfig for running the export :return: The HTTP response received from the Evolve App Server after attempting to run the opendss export """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - mutation createOpenDssModel($input: OpenDssModelInput!) { - createOpenDssModel(input: $input) - } - """, - "variables": { - "input": { - "modelName": config.model_name, - "isPublic": config.is_public, - "generationSpec": { - "modelOptions": { - "feeder": config.feeder, - "scenario": config.scenario, - "year": config.year - }, - "modulesConfiguration": { - "common": { - **({"fixedTime": {"loadTime": config.load_time.load_time.isoformat(), - "overrides": config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in config.load_time.load_overrides.items() - ] - }} if isinstance(config.load_time, FixedTime) else {}), - **({"timePeriod": { - "startTime": config.load_time.start_time.isoformat(), - "endTime": config.load_time.end_time.isoformat(), - "overrides": config.load_time.load_overrides and [ - { - "loadId": key, - "loadWattsOverride": value.load_watts, - "genWattsOverride": value.gen_watts, - "loadVarOverride": value.load_var, - "genVarOverride": value.gen_var, - } for key, value in config.load_time.load_overrides.items() - ] - }} if isinstance(config.load_time, TimePeriod) else {}) - }, - "generator": self.generator_config_to_json(config.generator_config), - } - } - } + return await self._do_post_request( + self.build_request(""" + mutation createOpenDssModel($input: OpenDssModelInput!) { + createOpenDssModel(input: $input) + }""", { + "input": config.to_json() } - } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + ) + ) def get_paged_opendss_models( - self, - limit: Optional[int] = None, - offset: Optional[int] = None, - query_filter: Optional[GetOpenDssModelsFilterInput] = None, - query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None): + self, + limit: Optional[int] = None, + offset: Optional[int] = None, + query_filter: Optional[GetOpenDssModelsFilterInput] = None, + query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None + ) -> dict: """ Retrieve a paginated opendss export run information + :param limit: The number of opendss export runs to retrieve :param offset: The number of opendss export runs to skip :param query_filter: The filter to apply to the query @@ -1222,116 +577,122 @@ def get_paged_opendss_models( self.async_get_paged_opendss_models(limit, offset, query_filter, query_sort)) async def async_get_paged_opendss_models( - self, - limit: Optional[int] = None, - offset: Optional[int] = None, - query_filter: Optional[GetOpenDssModelsFilterInput] = None, - query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None): + self, + limit: Optional[int] = None, + offset: Optional[int] = None, + query_filter: Optional[GetOpenDssModelsFilterInput] = None, + query_sort: Optional[GetOpenDssModelsSortCriteriaInput] = None + ) -> dict: """ Retrieve a paginated opendss export run information + :param limit: The number of opendss export runs to retrieve :param offset: The number of opendss export runs to skip :param query_filter: The filter to apply to the query :param query_sort: The sorting to apply to the query :return: The HTTP response received from the Evolve App Server after requesting opendss export run information """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - json = { - "query": """ - query pagedOpenDssModels($limit: Int, $offset: Long, $filter: GetOpenDssModelsFilterInput, $sort: GetOpenDssModelsSortCriteriaInput) { - pagedOpenDssModels(limit: $limit, offset: $offset, filter: $filter,sort: $sort) { - totalCount - offset, - models { - id - name - createdAt - createdBy - state - downloadUrl - isPublic - errors - generationSpec - } + _json = { + "query": """ + query pagedOpenDssModels($limit: Int, $offset: Long, $filter: GetOpenDssModelsFilterInput, $sort: GetOpenDssModelsSortCriteriaInput) { + pagedOpenDssModels(limit: $limit, offset: $offset, filter: $filter,sort: $sort) { + totalCount + offset, + models { + id + name + createdAt + createdBy + state + downloadUrl + isPublic + errors + generationSpec } } - """, - "variables": { - **({"limit": limit} if limit is not None else {}), - **({"offset": offset} if offset is not None else {}), - **({"filter": { - "name": query_filter.name, - "isPublic": query_filter.is_public, - "state": query_filter.state and [state.name for state in query_filter.state] - }} if query_filter else {}), - **({"sort": { - "name": query_sort.name and query_sort.name.name, - "createdAt": query_sort.created_at and query_sort.created_at.name, - "state": query_sort.state and query_sort.state.name, - "isPublic": query_sort.is_public and query_sort.is_public.name - }} if query_sort else {}) - } } - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.post( - construct_url(protocol=self._protocol, host=self._host, port=self._port, path="/api/graphql"), - headers=self._get_request_headers(), - json=json, - ssl=sslcontext if self._verify_certificate else False - ) as response: - if response.ok: - return await response.json() - else: - response.raise_for_status() + """, + "variables": {} + } + + if limit is not None: + _json['variables']['limit'] = limit + if offset is not None: + _json['variables']['offset'] = offset + if query_filter is not None: + _json['variables']['filter'] = query_filter.to_json() + if query_sort is not None: + _json['variables']['sort'] = query_sort.to_json() - def get_opendss_model_download_url(self, run_id: int): + return await self._do_post_request(_json) + + def get_opendss_model_download_url(self, run_id: int) -> dict: """ Retrieve a download url for the specified opendss export run id + :param run_id: The opendss export run ID :return: The HTTP response received from the Evolve App Server after requesting opendss export model download url """ return get_event_loop().run_until_complete(self.async_get_opendss_model_download_url(run_id)) - async def async_get_opendss_model_download_url(self, run_id: int): + async def async_get_opendss_model_download_url(self, run_id: int) -> dict: """ Retrieve a download url for the specified opendss export run id + :param run_id: The opendss export run ID :return: The HTTP response received from the Evolve App Server after requesting opendss export model download url """ - with warnings.catch_warnings(): - if not self._verify_certificate: - warnings.filterwarnings("ignore", category=InsecureRequestWarning) - - if self._verify_certificate: - sslcontext = ssl.create_default_context(cafile=self._ca_filename) - - async with self.session.get( - construct_url(protocol=self._protocol, host=self._host, port=self._port, - path=f"/api/opendss-model/{run_id}"), - headers=self._get_request_headers(), - ssl=sslcontext if self._verify_certificate else False, - allow_redirects=False + return await self._do_get_request(run_id=run_id) + + @catch_warnings + async def _do_post_request(self, _json): + self._do_request() + async with self.session.post( + construct_url(**self._auth.base_url_args, path="/api/graphql"), + headers=self._get_request_headers(), + json=_json, + ssl=self._get_ssl() + ) as response: + if response.ok: + return await response.json() + else: + response.raise_for_status() + + @catch_warnings + async def _do_get_request(self, *, run_id: int): # TODO: Terrible name probably + self._do_request() + async with self.session.get( + construct_url(**self._auth.base_url_args, + path=f"/api/opendss-model/{run_id}"), + headers=self._get_request_headers(), + ssl=self._get_ssl(), + allow_redirects=False ) as response: if response.status == HTTPStatus.FOUND: return response.headers["Location"] elif not response.ok: response.raise_for_status() + def _do_request(self): # TODO: can this just be called once per run? + if not self._verify_certificate: + warnings.filterwarnings("ignore", category=InsecureRequestWarning) + + def _get_ssl(self): + return ssl.create_default_context(cafile=self._ca_filename) if self._verify_certificate else False + def get_opendss_model(self, model_id: int): """ Retrieve information of a OpenDss model export + :param model_id: The OpenDss model export ID :return: The HTTP response received from the Evolve App Server after requesting the openDss model info """ return get_event_loop().run_until_complete(self.async_get_opendss_model(model_id)) - async def async_get_opendss_model(self, model_id: int): + async def async_get_opendss_model(self, model_id: int) -> dict: # TODO: this logic should be hidden behind a generator """ - Retrieve information of a OpenDss model export + Retrieve information of an OpenDss model export + :param model_id: The OpenDss model export ID :return: The HTTP response received from the Evolve App Server after requesting the openDss model info """ diff --git a/src/zepben/eas/client/feeder_load_analysis_input.py b/src/zepben/eas/client/feeder_load_analysis_input.py index efd3411..269ec2c 100644 --- a/src/zepben/eas/client/feeder_load_analysis_input.py +++ b/src/zepben/eas/client/feeder_load_analysis_input.py @@ -1,20 +1,21 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2025 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. + +__all__ = ["FeederLoadAnalysisInput"] + from dataclasses import dataclass from typing import List, Optional -__all__ = [ - "FeederLoadAnalysisInput" -] +from zepben.eas.client.util import HostingCapacityDataclass from zepben.eas.client.fla_forecast_config import FlaForecastConfig @dataclass -class FeederLoadAnalysisInput: +class FeederLoadAnalysisInput(HostingCapacityDataclass): """ A data class representing the configuration for a feeder load analysis study """ start_date: str diff --git a/src/zepben/eas/client/hc_commons.py b/src/zepben/eas/client/hc_commons.py index a94a7f7..ba3866b 100644 --- a/src/zepben/eas/client/hc_commons.py +++ b/src/zepben/eas/client/hc_commons.py @@ -1,4 +1,4 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2025 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this @@ -17,7 +17,6 @@ "BASIC_RESULTS_CONFIG", ] -from build.lib.zepben.eas.client.work_package import ResultsConfig from zepben.eas import StoredResultsConfig, RawResultsConfig, MetricsResultsConfig STORED_RESULTS_CONFIG_STORE_NONE = StoredResultsConfig( @@ -61,19 +60,19 @@ calculate_performance_metrics=True ) -RESULTS_CONFIG_DEFAULT_RESULTS_CONFIG = ResultsConfig( +RESULTS_CONFIG_DEFAULT_RESULTS_CONFIG = ResultsConfig( # FIXME: unused, but also undefined raw_config=RawResultsConfig(energy_meters_raw=True, energy_meter_voltages_raw=True), metrics_config=METRICS_RESULTS_CONFIG_CALCULATE_PERFORMANCE_METRICS, stored_results_config=STORED_RESULTS_CONFIG_STORE_NONE ) -STANDARD_RESULTS_CONFIG = ResultsConfig( +STANDARD_RESULTS_CONFIG = ResultsConfig( # FIXME: unused, but also undefined raw_config=RAW_RESULTS_CONFIG_STANDARD, metrics_config=METRICS_RESULTS_CONFIG_CALCULATE_PERFORMANCE_METRICS, stored_results_config=StoredResultsConfig(voltage_exceptions_raw=True, overloads_raw=True) ) -BASIC_RESULTS_CONFIG = ResultsConfig( +BASIC_RESULTS_CONFIG = ResultsConfig( # FIXME: unused, but also undefined raw_config=RAW_RESULTS_CONFIG_BASIC, metrics_config=METRICS_RESULTS_CONFIG_CALCULATE_PERFORMANCE_METRICS, stored_results_config=STORED_RESULTS_CONFIG_STORE_NONE diff --git a/src/zepben/eas/client/ingestor.py b/src/zepben/eas/client/ingestor.py index d384696..f0c0966 100644 --- a/src/zepben/eas/client/ingestor.py +++ b/src/zepben/eas/client/ingestor.py @@ -3,19 +3,27 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. + +__all__ = [ + "IngestorConfigInput", + "IngestorRuntimeKind", + "IngestorRunState", + "IngestorRun", + "IngestorRunsFilterInput", + "Order", + "IngestorRunsSortCriteriaInput", +] + from dataclasses import dataclass from enum import Enum from typing import Optional, List from datetime import datetime -__all__ = [ - "IngestorConfigInput", "IngestorRuntimeKind", "IngestorRunState", "IngestorRun", "IngestorRunsFilterInput", "Order", - "IngestorRunsSortCriteriaInput" -] +from zepben.eas.client.util import HostingCapacityDataclass @dataclass -class IngestorConfigInput: +class IngestorConfigInput(HostingCapacityDataclass): key: str value: str @@ -39,7 +47,7 @@ class IngestorRunState(Enum): @dataclass -class IngestorRun: +class IngestorRun(HostingCapacityDataclass): id: int container_runtime_type: Optional[IngestorRuntimeKind] payload: str @@ -51,7 +59,7 @@ class IngestorRun: @dataclass -class IngestorRunsFilterInput: +class IngestorRunsFilterInput(HostingCapacityDataclass): id: Optional[int] = None status: Optional[List[IngestorRunState]] = None completed: Optional[bool] = None @@ -64,7 +72,7 @@ class Order(Enum): @dataclass -class IngestorRunsSortCriteriaInput: +class IngestorRunsSortCriteriaInput(HostingCapacityDataclass): status: Optional[Order] = None started_at: Optional[Order] = None status_last_updated_at: Optional[Order] = None diff --git a/src/zepben/eas/client/opendss.py b/src/zepben/eas/client/opendss.py index 91838f7..fff3f1c 100644 --- a/src/zepben/eas/client/opendss.py +++ b/src/zepben/eas/client/opendss.py @@ -3,12 +3,6 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -from dataclasses import dataclass -from datetime import tzinfo -from enum import Enum -from typing import Union, Optional, List - -from zepben.eas.client.work_package import GeneratorConfig, TimePeriod, FixedTime __all__ = [ "OpenDssConfig", @@ -18,9 +12,16 @@ "GetOpenDssModelsSortCriteriaInput" ] +from dataclasses import dataclass +from enum import Enum +from typing import Union, Optional, List + +from zepben.eas.client.work_package import GeneratorConfig, TimePeriod, FixedTime +from zepben.eas.client.util import HostingCapacityDataclass + @dataclass -class OpenDssConfig: +class OpenDssConfig(HostingCapacityDataclass): """ A data class representing the configuration for an opendss export """ scenario: str year: int @@ -30,6 +31,25 @@ class OpenDssConfig: model_name: Optional[str] = None is_public: Optional[bool] = None + def to_json(self) -> dict: + _json = super().to_json() + _json["generationSpec"] = { + "modelOptions": { + "feeder": _json.pop('feeder'), + "scenario": _json.pop('scenario'), + "year": _json.pop('year'), + }, + "modulesConfiguration": { + "common": {}, + "generator": _json.pop('generatorConfig'), + } + } + if isinstance(self.load_time, TimePeriod): + _json["generationSpec"]["modulesConfiguration"]["common"]["timePeriod"] = _json.pop('loadTime') + elif isinstance(self.load_time, FixedTime): + _json["generationSpec"]["modulesConfiguration"]["common"]["fixedTime"] = _json.pop('loadTime') + return _json + class OpenDssModelState(Enum): COULD_NOT_START = "COULD_NOT_START" @@ -39,7 +59,7 @@ class OpenDssModelState(Enum): @dataclass -class GetOpenDssModelsFilterInput: +class GetOpenDssModelsFilterInput(HostingCapacityDataclass): """ A data class representing the filter to apply to the opendss export run paginated query """ name: Optional[str] = None is_public: Optional[int] = None @@ -52,7 +72,7 @@ class Order(Enum): @dataclass -class GetOpenDssModelsSortCriteriaInput: +class GetOpenDssModelsSortCriteriaInput(HostingCapacityDataclass): """ A data class representing the sort criteria to apply to the opendss export run paginated query """ name: Optional[Order] = None created_at: Optional[Order] = None diff --git a/src/zepben/eas/client/study.py b/src/zepben/eas/client/study.py index 9a9e05a..ec2facc 100644 --- a/src/zepben/eas/client/study.py +++ b/src/zepben/eas/client/study.py @@ -1,19 +1,21 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2025 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. +__all__ = ["GeoJsonOverlay", "StateOverlay", "Section", "Result", "Study"] + from dataclasses import dataclass, field from typing import List, Any from geojson import GeoJSON -__all__ = ["GeoJsonOverlay", "StateOverlay", "Section", "Result", "Study"] +from zepben.eas.client.util import HostingCapacityDataclass @dataclass -class GeoJsonOverlay: +class GeoJsonOverlay(HostingCapacityDataclass): """ A data class representing an Evolve App Server study result GeoJSON overlay """ data: GeoJSON styles: List[str] @@ -21,14 +23,14 @@ class GeoJsonOverlay: @dataclass -class StateOverlay: +class StateOverlay(HostingCapacityDataclass): """ A data class representing an Evolve App Server study result state overlay """ data: None styles: List[str] @dataclass -class Section: +class Section(HostingCapacityDataclass): """ A data class representing an Evolve App Server study result data section """ type: str name: str @@ -38,7 +40,7 @@ class Section: @dataclass -class Result: +class Result(HostingCapacityDataclass): """ A data class representing an Evolve App Server study result """ name: str geo_json_overlay: GeoJsonOverlay = None @@ -47,7 +49,7 @@ class Result: @dataclass -class Study: +class Study(HostingCapacityDataclass): """ A data class representing an Evolve App Server study """ name: str description: str diff --git a/src/zepben/eas/client/util.py b/src/zepben/eas/client/util.py index e3c52b8..3a60041 100644 --- a/src/zepben/eas/client/util.py +++ b/src/zepben/eas/client/util.py @@ -1,13 +1,104 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2025 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -__all__ = ["construct_url"] +__all__ = ["construct_url", "snake_to_camel", "HostingCapacityDataclass"] -from typing import Union +import inspect +from abc import ABC +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +from typing import Union, Any, Generator, Tuple def construct_url(protocol, host, path, port: Union[str, int] = None) -> str: return f"{protocol}://{host}{f':{port}' if port else ''}{path}" + +def snake_to_camel(snake_str): + """ + Converts a snake_case string to camelCase. + + :param snake_str: The string in snake_case format (e.g., "my_variable_name"). + :returns: The string in camelCase format (e.g., "myVariableName"). + """ + components = snake_str.split('_') + return components[0] + ''.join(x.title() for x in components[1:]) + +@dataclass +class HostingCapacityDataclass(ABC): # TODO: Another terrible name + _snake_to_camel_overrides = dict( + load_vmin_pu = 'loadVMinPu', + load_vmax_pu = 'loadVMaxPu', + gen_vmin_pu = 'genVMinPu', + gen_vmax_pu = 'genVMaxPu', + collapse_swer = 'collapseSWER', + split_phase_lv_kv = 'splitPhaseLVKV', + norm_vmin_pu = 'normVMinPu', + norm_vmax_pu = 'normVMaxPu', + emerg_vmin_pu = 'emergVMinPu', + emerg_vmax_pu = 'emergVMaxPu', + calculate_co2 = 'calculateCO2', + simplify_plsi_threshold = "simplifyPLSIThreshold", + ) + + def to_json(self) -> Any: + def _process_value(_value): + if isinstance(_value, HostingCapacityDataclass): + return _value.to_json() + elif isinstance(_value, Enum): + return _value.value + elif isinstance(_value, datetime): + return _value.isoformat() + elif isinstance(_value, list): + return [_process_value(i) for i in _value] + elif isinstance(_value, dict): + return {k: _process_value(v) for k, v in _value.items()} + elif isinstance(_value, (str, int, float)): + return _value + elif _value is None: + return None + else: + raise TypeError(f"Unsupported value type: {_value}") + return {self._snake_to_camel_overrides.get(k, snake_to_camel(k)): _process_value(v) for k, v in self._public_attrs()} + + @classmethod + def build_gql_query_object_model(cls): + def _process_value(_value): + """ + This is required for nested types, eg: Optional[MyClass] + """ + try: + try: + clazz = _value.__args__[0] + except AttributeError: + return None + if issubclass(clazz, HostingCapacityDataclass): + return clazz.build_gql_query_object_model() + except TypeError: + # This handles recursive nested types: Optional[List[MyClass]] + try: + if clazz._name == 'List': + return _process_value(clazz) + except AttributeError: + pass + return None + + def _get_str(k, v): + _rv = cls._snake_to_camel_overrides.get(k, snake_to_camel(k)) + _pv = _process_value(v) + if _pv is None: + return _rv + return _rv + " { " + _pv + " }" + + # Iterate over all object attrs, convert them into camel case space delimited strings, if the hinted type of the + # attr is a subclass of HostingCapacityDataclass, then we want to process it and return the desired string. + return ' '.join(_get_str(k, v) for k, v in inspect.get_annotations(cls).items() if not k.startswith("_")) + + def _public_attrs(self) -> Generator[Tuple[str, Any], None, None]: + for k, v in self.__dict__.items(): + if not k.startswith("_"): + yield k, v + diff --git a/src/zepben/eas/client/work_package.py b/src/zepben/eas/client/work_package.py index 7311326..3e429fa 100644 --- a/src/zepben/eas/client/work_package.py +++ b/src/zepben/eas/client/work_package.py @@ -1,12 +1,8 @@ -# Copyright 2020 Zeppelin Bend Pty Ltd +# Copyright 2025 Zeppelin Bend Pty Ltd # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. -from dataclasses import dataclass -from datetime import datetime -from enum import Enum -from typing import List, Optional, Union, Dict __all__ = [ "SwitchClass", @@ -47,18 +43,25 @@ "NodeLevelResultsConfig" ] +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +from typing import List, Optional, Union, Dict, Any + +from zepben.eas.client.util import HostingCapacityDataclass + class SwitchClass(Enum): - BREAKER = "BREAKER", - DISCONNECTOR = "DISCONNECTOR", - FUSE = "FUSE", - JUMPER = "JUMPER", - LOAD_BREAK_SWITCH = "LOAD_BREAK_SWITCH", + BREAKER = "BREAKER" + DISCONNECTOR = "DISCONNECTOR" + FUSE = "FUSE" + JUMPER = "JUMPER" + LOAD_BREAK_SWITCH = "LOAD_BREAK_SWITCH" RECLOSER = "RECLOSER" @dataclass -class SwitchMeterPlacementConfig: +class SwitchMeterPlacementConfig(HostingCapacityDataclass): meter_switch_class: Optional[SwitchClass] = None """The CIM class of Switch to create meters at""" @@ -70,7 +73,7 @@ class SwitchMeterPlacementConfig: @dataclass -class FixedTimeLoadOverride: +class FixedTimeLoadOverride(HostingCapacityDataclass): load_watts: Optional[List[float]] """ The readings to be used to override load watts @@ -91,11 +94,13 @@ class FixedTimeLoadOverride: The readings to be used to override gen var """ - # def __str__(self): + def to_json(self) -> dict: + _json = super().to_json() + return {f'{k}Override': v for k, v in _json.items()} @dataclass -class TimePeriodLoadOverride: +class TimePeriodLoadOverride(HostingCapacityDataclass): load_watts: Optional[List[float]] """ A list of readings to be used to override load watts. @@ -140,30 +145,51 @@ class TimePeriodLoadOverride: 1.0: 24 entries for daily and 8760 for yearly """ + def to_json(self) -> dict: + _json = super().to_json() + return {f'{k}Override': v for k, v in _json.items()} + +@dataclass +class OverrideModel(HostingCapacityDataclass): + def to_json(self): + _json = super().to_json() + try: + _json['overrides'] = [{'loadId': k, **v} for k, v in _json.pop('loadOverrides').items()] + except AttributeError: + _json['overrides'] = None + return _json -class FixedTime: + +@dataclass +class FixedTime(OverrideModel): """ A single point in time to model. Should be precise to the minute, and load data must be present for the provided time in the load database for accurate results. """ + load_time: datetime + load_overrides: Optional[Dict[str, FixedTimeLoadOverride]] def __init__(self, load_time: datetime, load_overrides: Optional[Dict[str, FixedTimeLoadOverride]] = None): self.load_time = load_time.replace(second=0, microsecond=0, tzinfo=None) self.load_overrides = load_overrides -class TimePeriod: +@dataclass +class TimePeriod(OverrideModel): """ A time period to model, from a start time to an end time. Maximum of 1 year. Load data must be available in the load database between the provided start and end time for accurate results. """ + start_time: datetime + end_time: datetime + load_overrides: Optional[Dict[str, TimePeriodLoadOverride]] = None def __init__( - self, - start_time: datetime, - end_time: datetime, - load_overrides: Optional[Dict[str, TimePeriodLoadOverride]] = None + self, + start_time: datetime, + end_time: datetime, + load_overrides: Optional[Dict[str, TimePeriodLoadOverride]] = None ): self._validate(start_time, end_time) self.start_time = start_time.replace(second=0, microsecond=0, tzinfo=None) @@ -195,7 +221,7 @@ class FeederScenarioAllocationStrategy(Enum): @dataclass -class MeterPlacementConfig: +class MeterPlacementConfig(HostingCapacityDataclass): feeder_head: Optional[bool] = None """Whether to place a meter at the voltage source at the feeder head.""" @@ -210,7 +236,7 @@ class MeterPlacementConfig: @dataclass -class PVVoltVARVoltWattConfig: +class PVVoltVARVoltWattConfig(HostingCapacityDataclass): cut_off_date: Optional[datetime] = None """Optional cut-off date to determine which profile to apply to equipment during translation to the OpenDss model. If supplied, the "commissionedDate" of the equipment is compared against this date, equipment that do not have a @@ -226,7 +252,7 @@ class PVVoltVARVoltWattConfig: @dataclass -class ModelConfig: +class ModelConfig(HostingCapacityDataclass): vm_pu: Optional[float] = None """Voltage per-unit of voltage source.""" @@ -521,7 +547,7 @@ class SolveMode(Enum): @dataclass -class SolveConfig: +class SolveConfig(HostingCapacityDataclass): norm_vmin_pu: Optional[float] = None norm_vmax_pu: Optional[float] = None emerg_vmin_pu: Optional[float] = None @@ -543,7 +569,7 @@ class SolveConfig: @dataclass -class RawResultsConfig: +class RawResultsConfig(HostingCapacityDataclass): """ Whether to produce raw results generated from OpenDSS. You will likely always want defaults for this, as setting any of these to False will limit @@ -577,7 +603,7 @@ class RawResultsConfig: @dataclass -class MetricsResultsConfig: +class MetricsResultsConfig(HostingCapacityDataclass): """ Calculated metrics based off the raw results """ @@ -587,7 +613,7 @@ class MetricsResultsConfig: @dataclass -class StoredResultsConfig: +class StoredResultsConfig(HostingCapacityDataclass): """ The raw results that will be stored. Note storing raw results will utilise a lot of storage space and should be avoided for @@ -620,7 +646,7 @@ class StoredResultsConfig: @dataclass -class NodeLevelResultsConfig: +class NodeLevelResultsConfig(HostingCapacityDataclass): """ Configuration settings for node level results. """ @@ -668,12 +694,11 @@ class NodeLevelResultsConfig: @dataclass -class GeneratorConfig: +class GeneratorConfig(HostingCapacityDataclass): """ Configuration settings for the OpenDSS model. These settings make changes to the network and specific OpenDSS settings prior to model execution. """ - model: Optional[ModelConfig] = None solve: Optional[SolveConfig] = None raw_results: Optional[RawResultsConfig] = None @@ -681,7 +706,7 @@ class GeneratorConfig: @dataclass -class EnhancedMetricsConfig: +class EnhancedMetricsConfig(HostingCapacityDataclass): populate_enhanced_metrics: Optional[bool] = None populate_enhanced_metrics_profile: Optional[bool] = None populate_duration_curves: Optional[bool] = None @@ -695,17 +720,17 @@ class EnhancedMetricsConfig: class WriterType(Enum): - POSTGRES = "POSTGRES", + POSTGRES = "POSTGRES" PARQUET = "PARQUET" @dataclass -class WriterOutputConfig: +class WriterOutputConfig(HostingCapacityDataclass): enhanced_metrics_config: Optional[EnhancedMetricsConfig] = None @dataclass -class WriterConfig: +class WriterConfig(HostingCapacityDataclass): writer_type: Optional[WriterType] = None """ Whether to write output to Parquet files or a Postgres database. @@ -717,7 +742,7 @@ class WriterConfig: @dataclass -class ResultProcessorConfig: +class ResultProcessorConfig(HostingCapacityDataclass): """ Configuration specific to processing of results. """ @@ -733,31 +758,30 @@ class ResultProcessorConfig: @dataclass -class YearRange: +class YearRange(HostingCapacityDataclass): min_year: int max_year: int -@dataclass class InterventionClass(Enum): - TARIFF_REFORM = "TARIFF_REFORM", - CONTROLLED_LOAD_HOT_WATER = "CONTROLLED_LOAD_HOT_WATER", - COMMUNITY_BESS = "COMMUNITY_BESS", - DISTRIBUTION_TX_OLTC = "DISTRIBUTION_TX_OLTC", - LV_STATCOMS = "LV_STATCOMS", - DVMS = "DVMS", - PHASE_REBALANCING = "PHASE_REBALANCING", - DISTRIBUTION_TAP_OPTIMIZATION = "DISTRIBUTION_TAP_OPTIMIZATION", + TARIFF_REFORM = "TARIFF_REFORM" + CONTROLLED_LOAD_HOT_WATER = "CONTROLLED_LOAD_HOT_WATER" + COMMUNITY_BESS = "COMMUNITY_BESS" + DISTRIBUTION_TX_OLTC = "DISTRIBUTION_TX_OLTC" + LV_STATCOMS = "LV_STATCOMS" + DVMS = "DVMS" + PHASE_REBALANCING = "PHASE_REBALANCING" + DISTRIBUTION_TAP_OPTIMIZATION = "DISTRIBUTION_TAP_OPTIMIZATION" UNKNOWN = "UNKNOWN" class CandidateGenerationType(Enum): - CRITERIA = "CRITERIA", + CRITERIA = "CRITERIA" TAP_OPTIMIZATION = "TAP_OPTIMIZATION" @dataclass -class CandidateGenerationConfig: +class CandidateGenerationConfig(HostingCapacityDataclass): type: CandidateGenerationType """The type of method for generating the intervention candidates.""" @@ -802,14 +826,14 @@ class CandidateGenerationConfig: @dataclass -class PhaseRebalanceProportions: +class PhaseRebalanceProportions(HostingCapacityDataclass): a: float b: float c: float @dataclass -class RegulatorConfig: +class RegulatorConfig(HostingCapacityDataclass): pu_target: float """Voltage p.u. to move the average customer voltage towards.""" @@ -827,7 +851,7 @@ class RegulatorConfig: @dataclass -class DvmsConfig: +class DvmsConfig(HostingCapacityDataclass): lower_limit: float """The lower limit of voltage (p.u.) considered acceptable for the purposes of DVMS.""" @@ -848,7 +872,7 @@ class DvmsConfig: @dataclass -class InterventionConfig: +class InterventionConfig(HostingCapacityDataclass): base_work_package_id: str """ ID of the work package that this intervention is based on. @@ -893,7 +917,7 @@ class InterventionConfig: @dataclass -class ForecastConfig(object): +class ForecastConfig(HostingCapacityDataclass): feeders: List[str] """The feeders to process in this work package""" @@ -914,10 +938,19 @@ class ForecastConfig(object): load database for accurate results. Specifying an invalid time (i.e one with no load data) will result in inaccurate results. """ + def to_json(self) -> Any: + _json = super().to_json() + if isinstance(self.load_time, TimePeriod): + _json['timePeriod'] = _json.pop('loadTime') + _json['fixedTime'] = None + elif isinstance(self.load_time, FixedTime): + _json['fixedTime'] = _json.pop('loadTime') + _json['timePeriod'] = None + return _json @dataclass -class FeederConfig(object): +class FeederConfig(HostingCapacityDataclass): feeder: str """The feeder to process in this work package""" @@ -938,16 +971,24 @@ class FeederConfig(object): load database for accurate results. Specifying an invalid time (i.e one with no load data) will result in inaccurate results. """ + def to_json(self) -> Any: + _json = super().to_json() + if isinstance(self.load_time, TimePeriod): + _json['timePeriod'] = _json.pop('loadTime') + elif isinstance(self.load_time, FixedTime): + _json['fixedTime'] = _json.pop('loadTime') + return _json + @dataclass -class FeederConfigs(object): +class FeederConfigs(HostingCapacityDataclass): configs: list[FeederConfig] """The feeder to process in this work package""" @dataclass -class WorkPackageConfig: +class WorkPackageConfig(HostingCapacityDataclass): """ A data class representing the configuration for a hosting capacity work package """ name: str syf_config: Union[ForecastConfig, FeederConfigs] @@ -972,9 +1013,24 @@ class WorkPackageConfig: intervention: Optional[InterventionConfig] = None """Configuration for applying an intervention""" + def to_json(self) -> dict: + _json = super().to_json() + if _json.get('syfConfig'): + if isinstance(self.syf_config, ForecastConfig): + _json['forecastConfig'] = _json.pop('syfConfig') + _json['feederConfigs'] = None + elif isinstance(self.syf_config, FeederConfigs): + _json['feederConfigs'] = _json.pop('syfConfig') + _json['forecastConfig'] = None + + if _json.get('executorConfig') is None: + _json['executorConfig'] = {} + + return {"input": _json, 'workPackageName': _json.pop('name')} + @dataclass -class WorkPackageProgress: +class WorkPackageProgress(HostingCapacityDataclass): id: str progress_percent: int pending: List[str] @@ -986,6 +1042,6 @@ class WorkPackageProgress: @dataclass -class WorkPackagesProgress: +class WorkPackagesProgress(HostingCapacityDataclass): pending: List[str] in_progress: List[WorkPackageProgress] diff --git a/test/test_eas_client.py b/test/test_eas_client.py index 4d1e365..4609dd3 100644 --- a/test/test_eas_client.py +++ b/test/test_eas_client.py @@ -19,14 +19,17 @@ from zepben.eas import EasClient, Study, SolveConfig from zepben.eas import FeederConfig, ForecastConfig, FixedTimeLoadOverride -from zepben.eas.client.ingestor import IngestorConfigInput, IngestorRunsSortCriteriaInput, IngestorRunsFilterInput, \ - IngestorRunState, IngestorRuntimeKind +from zepben.eas.client.auth_method import BaseAuthMethod, TokenAuth +from zepben.eas.client.ingestor import IngestorConfigInput, IngestorRunsFilterInput, IngestorRunState, \ + IngestorRuntimeKind, IngestorRunsSortCriteriaInput from zepben.eas.client.opendss import OpenDssConfig, GetOpenDssModelsFilterInput, OpenDssModelState, \ GetOpenDssModelsSortCriteriaInput, \ Order from zepben.eas.client.study import Result from zepben.eas.client.work_package import FeederConfigs, TimePeriodLoadOverride, \ - FixedTime, NodeLevelResultsConfig, PVVoltVARVoltWattConfig + FixedTime, NodeLevelResultsConfig, ResultProcessorConfig, WriterConfig, WriterOutputConfig, EnhancedMetricsConfig, \ + StoredResultsConfig, \ + MetricsResultsConfig, PVVoltVARVoltWattConfig from zepben.eas.client.work_package import WorkPackageConfig, TimePeriod, GeneratorConfig, ModelConfig, \ FeederScenarioAllocationStrategy, LoadPlacement, MeterPlacementConfig, SwitchMeterPlacementConfig, SwitchClass, \ SolveMode, RawResultsConfig @@ -62,37 +65,43 @@ def json(self): def test_create_eas_client_success(): eas_client = EasClient( - mock_host, - mock_port, - protocol=mock_protocol, - verify_certificate=mock_verify_certificate + BaseAuthMethod( + mock_host, + mock_port, + protocol=mock_protocol, + verify_certificate=mock_verify_certificate + ) ) assert eas_client is not None - assert eas_client._host == mock_host - assert eas_client._port == mock_port - assert eas_client._protocol == mock_protocol + assert eas_client._auth._host == mock_host + assert eas_client._auth._port == mock_port + assert eas_client._auth.protocol == mock_protocol assert eas_client._verify_certificate == mock_verify_certificate def test_create_eas_client_with_access_token_success(): eas_client = EasClient( - mock_host, - mock_port, - access_token=mock_access_token, + TokenAuth( + mock_host, + mock_port, + access_token=mock_access_token, + ) ) assert eas_client is not None - assert eas_client._host == mock_host - assert eas_client._port == mock_port - assert eas_client._access_token == mock_access_token + assert eas_client._auth._host == mock_host + assert eas_client._auth._port == mock_port + assert eas_client._auth.token.split()[1] == mock_access_token def test_get_request_headers_adds_access_token_in_auth_header(): eas_client = EasClient( - mock_host, - mock_port, - access_token=mock_access_token, + TokenAuth( + mock_host, + mock_port, + access_token=mock_access_token, + ) ) headers = eas_client._get_request_headers() @@ -102,13 +111,15 @@ def test_get_request_headers_adds_access_token_in_auth_header(): @mock.patch("zepben.auth.client.zepben_token_fetcher.ZepbenTokenFetcher.fetch_token", return_value="test_token3") def test_get_request_headers_adds_token_from_token_fetcher_in_auth_header(_): eas_client = EasClient( - mock_host, - mock_port, - token_fetcher=ZepbenTokenFetcher(audience="fake", token_endpoint="unused") + TokenAuth( + mock_host, + mock_port, + token_fetcher=ZepbenTokenFetcher(audience="fake", token_endpoint="unused") + ) ) assert eas_client is not None - assert eas_client._token_fetcher is not None + assert eas_client._auth._token_fetcher is not None headers = eas_client._get_request_headers() assert headers["authorization"] == "test_token3" @@ -117,22 +128,24 @@ def test_get_request_headers_adds_token_from_token_fetcher_in_auth_header(_): {"authType": "AUTH0", "audience": mock_audience, "issuer": "test_issuer"}, 200)) def test_create_eas_client_with_password_success(_): eas_client = EasClient( - mock_host, - mock_port, - client_id=mock_client_id, - username=mock_username, - password=mock_password, - verify_certificate=mock_verify_certificate + TokenAuth( + mock_host, + mock_port, + client_id=mock_client_id, + username=mock_username, + password=mock_password, + verify_certificate=mock_verify_certificate + ) ) assert eas_client is not None - assert eas_client._token_fetcher is not None - assert eas_client._token_fetcher.token_request_data["grant_type"] == "password" - assert eas_client._token_fetcher.token_request_data["client_id"] == mock_client_id - assert eas_client._token_fetcher.token_request_data["username"] == mock_username - assert eas_client._token_fetcher.token_request_data["password"] == mock_password - assert eas_client._host == mock_host - assert eas_client._port == mock_port + assert eas_client._auth._token_fetcher is not None + assert eas_client._auth._token_fetcher.token_request_data["grant_type"] == "password" + assert eas_client._auth._token_fetcher.token_request_data["client_id"] == mock_client_id + assert eas_client._auth._token_fetcher.token_request_data["username"] == mock_username + assert eas_client._auth._token_fetcher.token_request_data["password"] == mock_password + assert eas_client._auth._host == mock_host + assert eas_client._auth._port == mock_port assert eas_client._verify_certificate == mock_verify_certificate @@ -140,19 +153,21 @@ def test_create_eas_client_with_password_success(_): {"authType": "AUTH0", "audience": mock_audience, "issuer": "test_issuer"}, 200)) def test_create_eas_client_with_client_secret_success(_): eas_client = EasClient( - mock_host, - mock_port, - client_id=mock_client_id, - client_secret=mock_client_secret, - verify_certificate=mock_verify_certificate + TokenAuth( + mock_host, + mock_port, + client_id=mock_client_id, + client_secret=mock_client_secret, + verify_certificate=mock_verify_certificate + ) ) assert eas_client is not None - assert eas_client._token_fetcher is not None - assert eas_client._token_fetcher.token_request_data["grant_type"] == "client_credentials" - assert eas_client._token_fetcher.token_request_data["client_secret"] == mock_client_secret - assert eas_client._host == mock_host - assert eas_client._port == mock_port + assert eas_client._auth._token_fetcher is not None + assert eas_client._auth._token_fetcher.token_request_data["grant_type"] == "client_credentials" + assert eas_client._auth._token_fetcher.token_request_data["client_secret"] == mock_client_secret + assert eas_client._auth._host == mock_host + assert eas_client._auth._port == mock_port assert eas_client._verify_certificate == mock_verify_certificate @@ -180,9 +195,11 @@ def httpserver_ssl_context(localhost_cert): def test_get_work_package_cost_estimation_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json( @@ -209,9 +226,11 @@ def test_get_work_package_cost_estimation_no_verify_success(httpserver: HTTPServ def test_get_work_package_cost_estimation_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -238,9 +257,11 @@ def test_get_work_package_cost_estimation_invalid_certificate_failure(ca: trustm def test_get_work_package_cost_estimation_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -256,7 +277,7 @@ def test_get_work_package_cost_estimation_valid_certificate_success(ca: trustme. ["scenario"], FixedTime( datetime(2022, 1, 1), - {"meter": FixedTimeLoadOverride(1, 2, 3, 4)} + {"meter": FixedTimeLoadOverride(1.1, 2.1, 3.1, 4.1)} # FIXME: is this test data wrong, or is it the type hinting? ) )] ) @@ -268,9 +289,11 @@ def test_get_work_package_cost_estimation_valid_certificate_success(ca: trustme. def test_run_hosting_capacity_work_package_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"data": {"runWorkPackage": "workPackageId"}}) @@ -296,9 +319,11 @@ def test_run_hosting_capacity_work_package_no_verify_success(httpserver: HTTPSer def test_run_hosting_capacity_work_package_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -325,9 +350,11 @@ def test_run_hosting_capacity_work_package_invalid_certificate_failure(ca: trust def test_run_hosting_capacity_work_package_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -354,9 +381,11 @@ def test_run_hosting_capacity_work_package_valid_certificate_success(ca: trustme def test_cancel_hosting_capacity_work_package_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json( @@ -370,9 +399,11 @@ def test_cancel_hosting_capacity_work_package_no_verify_success(httpserver: HTTP def test_cancel_hosting_capacity_work_package_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -385,9 +416,11 @@ def test_cancel_hosting_capacity_work_package_invalid_certificate_failure(ca: tr def test_cancel_hosting_capacity_work_package_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -400,9 +433,11 @@ def test_cancel_hosting_capacity_work_package_valid_certificate_success(ca: trus def test_get_hosting_capacity_work_package_progress_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json( @@ -416,9 +451,11 @@ def test_get_hosting_capacity_work_package_progress_no_verify_success(httpserver def test_get_hosting_capacity_work_package_progress_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -431,9 +468,11 @@ def test_get_hosting_capacity_work_package_progress_invalid_certificate_failure( def test_get_hosting_capacity_work_package_progress_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -446,9 +485,11 @@ def test_get_hosting_capacity_work_package_progress_valid_certificate_success(ca def test_upload_study_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_json({"result": "success"}) @@ -460,9 +501,11 @@ def test_upload_study_no_verify_success(httpserver: HTTPServer): def test_upload_study_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -474,9 +517,11 @@ def test_upload_study_invalid_certificate_failure(ca: trustme.CA, httpserver: HT def test_upload_study_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -489,35 +534,43 @@ def test_upload_study_valid_certificate_success(ca: trustme.CA, httpserver: HTTP def test_raises_error_if_auth_configured_with_http_server(httpserver: HTTPServer): with pytest.raises(ValueError): EasClient( - LOCALHOST, - httpserver.port, - protocol="http", - client_id=mock_client_id, - username=mock_username, - password=mock_password + TokenAuth( + LOCALHOST, + httpserver.port, + protocol="http", + client_id=mock_client_id, + username=mock_username, + password=mock_password + ) ) def test_raises_error_if_token_fetcher_and_creds_configured(httpserver: HTTPServer): - with pytest.raises(ValueError, match="You cannot provide both a token_fetcher and credentials"): + with pytest.raises(ValueError, match="Incompatible arguments passed to connect to secured Evolve App Server."): + # noinspection PyArgumentList EasClient( - LOCALHOST, - httpserver.port, - protocol="https", - client_id=mock_client_id, - username=mock_username, - password=mock_password, - token_fetcher=ZepbenTokenFetcher(audience="test", auth_method="test", token_endpoint="some-endpoint") + TokenAuth( + LOCALHOST, + httpserver.port, + protocol="https", + client_id=mock_client_id, + username=mock_username, + password=mock_password, + token_fetcher=ZepbenTokenFetcher(audience="test", auth_method="test", token_endpoint="some-endpoint") + ) ) - with pytest.raises(ValueError, match="You cannot provide both a token_fetcher and credentials"): + with pytest.raises(ValueError, match="Incompatible arguments passed to connect to secured Evolve App Server."): + # noinspection PyArgumentList EasClient( - LOCALHOST, - httpserver.port, - protocol="https", - client_id=mock_client_id, - client_secret=mock_client_secret, - token_fetcher=ZepbenTokenFetcher(audience="test", auth_method="test", token_endpoint="test") + TokenAuth( + LOCALHOST, + httpserver.port, + protocol="https", + client_id=mock_client_id, + client_secret=mock_client_secret, + token_fetcher=ZepbenTokenFetcher(audience="test", auth_method="test", token_endpoint="test") + ) ) @@ -525,85 +578,102 @@ def test_raises_error_if_token_fetcher_and_creds_configured(httpserver: HTTPServ {"authType": "AUTH0", "audience": mock_audience, "issuer": "test_issuer"}, 200)) def test_allows_secret_and_creds_configured(httpserver: HTTPServer): eas_client = EasClient( - mock_host, - mock_port, - protocol="https", - client_id=mock_client_id, - client_secret=mock_client_secret, - username=mock_username, - password=mock_password + TokenAuth( + mock_host, + mock_port, + protocol="https", + client_id=mock_client_id, + client_secret=mock_client_secret, + username=mock_username, + password=mock_password + ) ) assert eas_client is not None - assert eas_client._token_fetcher is not None - assert eas_client._token_fetcher.token_request_data["grant_type"] == "password" - assert eas_client._token_fetcher.token_request_data["client_id"] == mock_client_id - assert eas_client._token_fetcher.token_request_data["username"] == mock_username - assert eas_client._token_fetcher.token_request_data["password"] == mock_password - assert eas_client._token_fetcher.token_request_data["client_secret"] == mock_client_secret - assert eas_client._host == mock_host - assert eas_client._port == mock_port + assert eas_client._auth._token_fetcher is not None + assert eas_client._auth._token_fetcher.token_request_data["grant_type"] == "password" + assert eas_client._auth._token_fetcher.token_request_data["client_id"] == mock_client_id + assert eas_client._auth._token_fetcher.token_request_data["username"] == mock_username + assert eas_client._auth._token_fetcher.token_request_data["password"] == mock_password + assert eas_client._auth._token_fetcher.token_request_data["client_secret"] == mock_client_secret + assert eas_client._auth._host == mock_host + assert eas_client._auth._port == mock_port def test_raises_error_if_access_token_and_creds_configured(httpserver: HTTPServer): with pytest.raises(ValueError) as error_message_for_username: + # noinspection PyArgumentList EasClient( - LOCALHOST, - httpserver.port, - protocol="https", - access_token=mock_access_token, - username=mock_username, + TokenAuth( + LOCALHOST, + httpserver.port, + protocol="https", + access_token=mock_access_token, + username=mock_username, + ) ) - assert "Incompatible arguments passed to connect to secured Evolve App Server. You cannot provide multiple types of authentication. When using an access_token, do not provide client_id, client_secret, username, password, or token_fetcher." in str( + assert "Incompatible arguments passed to connect to secured Evolve App Server." in str( error_message_for_username.value) with pytest.raises(ValueError) as error_message_for_password: + # noinspection PyArgumentList EasClient( - LOCALHOST, - httpserver.port, - protocol="https", - access_token=mock_access_token, - password=mock_password, + TokenAuth( + LOCALHOST, + httpserver.port, + protocol="https", + access_token=mock_access_token, + password=mock_password, + ) ) - assert "Incompatible arguments passed to connect to secured Evolve App Server. You cannot provide multiple types of authentication. When using an access_token, do not provide client_id, client_secret, username, password, or token_fetcher." in str( + assert "Incompatible arguments passed to connect to secured Evolve App Server." in str( error_message_for_password.value) def test_raises_error_if_access_token_and_token_fetcher_configured(httpserver: HTTPServer): with pytest.raises(ValueError) as error_message_for_username: + # noinspection PyArgumentList EasClient( - LOCALHOST, - httpserver.port, - protocol="https", - access_token=mock_access_token, - token_fetcher=ZepbenTokenFetcher(audience="test", auth_method="test", token_endpoint="test") + TokenAuth( + LOCALHOST, + httpserver.port, + protocol="https", + access_token=mock_access_token, + token_fetcher=ZepbenTokenFetcher(audience="test", auth_method="test", token_endpoint="test") + ) ) - assert "Incompatible arguments passed to connect to secured Evolve App Server. You cannot provide multiple types of authentication. When using an access_token, do not provide client_id, client_secret, username, password, or token_fetcher." in str( + assert "Incompatible arguments passed to connect to secured Evolve App Server." in str( error_message_for_username.value) def test_raises_error_if_access_token_and_client_id_configured(httpserver: HTTPServer): with pytest.raises(ValueError) as error_message_for_username: + # noinspection PyArgumentList EasClient( - LOCALHOST, - httpserver.port, - protocol="https", - access_token=mock_access_token, - client_id=mock_client_id + TokenAuth( + LOCALHOST, + httpserver.port, + protocol="https", + access_token=mock_access_token, + client_id=mock_client_id + ) ) - assert "Incompatible arguments passed to connect to secured Evolve App Server. You cannot provide multiple types of authentication. When using an access_token, do not provide client_id, client_secret, username, password, or token_fetcher." in str( + assert "Incompatible arguments passed to connect to secured Evolve App Server." in str( error_message_for_username.value) def test_raises_error_if_access_token_and_client_secret_configured(httpserver: HTTPServer): with pytest.raises(ValueError) as error_message_for_username: + # noinspection PyArgumentList EasClient( - LOCALHOST, - httpserver.port, - protocol="https", - access_token=mock_access_token, - client_secret=mock_client_secret + TokenAuth( + LOCALHOST, + httpserver.port, + protocol="https", + access_token=mock_access_token, + client_secret=mock_client_secret + ) ) - assert "Incompatible arguments passed to connect to secured Evolve App Server. You cannot provide multiple types of authentication. When using an access_token, do not provide client_id, client_secret, username, password, or token_fetcher." in str( + assert "Incompatible arguments passed to connect to secured Evolve App Server." in str( error_message_for_username.value) @@ -622,9 +692,11 @@ def hosting_capacity_run_calibration_request_handler(request): def test_run_hosting_capacity_calibration_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -637,9 +709,11 @@ def test_run_hosting_capacity_calibration_no_verify_success(httpserver: HTTPServ def test_run_hosting_capacity_calibration_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -651,9 +725,11 @@ def test_run_hosting_capacity_calibration_invalid_certificate_failure(ca: trustm def test_run_hosting_capacity_calibration_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -676,9 +752,11 @@ def get_hosting_capacity_run_calibration_request_handler(request): def test_get_hosting_capacity_calibration_run_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -691,9 +769,11 @@ def test_get_hosting_capacity_calibration_run_no_verify_success(httpserver: HTTP def test_get_hosting_capacity_calibration_run_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -705,9 +785,11 @@ def test_get_hosting_capacity_calibration_run_invalid_certificate_failure(ca: tr def test_get_hosting_capacity_calibration_run_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -790,9 +872,11 @@ def hosting_capacity_run_calibration_with_calibration_time_request_handler(reque def test_run_hosting_capacity_calibration_with_calibration_time_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -811,9 +895,11 @@ def test_run_hosting_capacity_calibration_with_calibration_time_no_verify_succes def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings_no_generator_config( httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -913,9 +999,11 @@ def hosting_capacity_run_calibration_with_generator_config_request_handler(reque def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings_partial_generator_config( httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1006,9 +1094,11 @@ def hosting_capacity_run_calibration_with_partial_model_config_request_handler(r def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings_partial_model_config( httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1025,9 +1115,11 @@ def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings def test_run_hosting_capacity_calibration_with_explicit_transformer_tap_settings(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1057,9 +1149,11 @@ def get_hosting_capacity_calibration_sets_request_handler(request): def test_get_hosting_capacity_calibration_sets_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1068,6 +1162,387 @@ def test_get_hosting_capacity_calibration_sets_no_verify_success(httpserver: HTT httpserver.check_assertions() assert res == ["one", "two", "three"] +def test_work_package_config_to_json(): + config = { + "work_package_name": "test_default_load_work_package", + "feeders": ["MS1112"], + "load_time": { + "start1": "2024-06-01T00:00:00", + "end1": "2025-06-01T00:00:00", + "start2": "2023-07-22T00:00:00", + "end2": "2024-01-22T00:00:00" + }, + "forecast_years": [ + 2025, 2026 + ], + "scenarios": [ + "test_central_with_transfers_feeder_poe50" + ], + "default_load_watts": [920, 910, 820, 690, 600, 530, 490, 470, 450, 450, 470, 490, 540, 600, 620, 680, 700, 700, 710, 720, 720, 730, 730, 730, 730, 730, 710, 710, 720, 730, 740, 780, 830, 890, 930, 1000, 1020, 1020, 1000, 970, 940, 900, 860, 800, 830, 870, 900, 920], + "default_gen_watts": None, + "default_load_var": None, + "default_gen_var": None + } + + forecast_config = ForecastConfig( + feeders=config["feeders"], + years=config["forecast_years"], + scenarios=config["scenarios"], + load_time=TimePeriod( + start_time=datetime.fromisoformat(config["load_time"]["start1"]), + end_time=datetime.fromisoformat(config["load_time"]["end1"]), + ) + ) + + work_package = WorkPackageConfig( + name=config["work_package_name"], + syf_config=forecast_config, + generator_config=GeneratorConfig( + model=ModelConfig( + load_vmax_pu=1.2, + load_vmin_pu=0.8, + p_factor_base_exports=-1, + p_factor_base_imports=1, + p_factor_forecast_pv=1, + fix_single_phase_loads=False, + max_single_phase_load=15000.0, + max_load_service_line_ratio=1.0, + max_load_lv_line_ratio=2.0, + max_load_tx_ratio=2.0, + max_gen_tx_ratio=4.0, + fix_overloading_consumers=True, + fix_undersized_service_lines=True, + feeder_scenario_allocation_strategy=FeederScenarioAllocationStrategy.ADDITIVE, + closed_loop_v_reg_enabled=False, + closed_loop_v_reg_set_point=0.9925, + seed=123, + load_interval_length_hours = 0.5, + default_load_watts=config['default_load_watts'], + default_gen_watts=config['default_gen_watts'], + default_load_var=config['default_load_var'], + default_gen_var=config['default_gen_var'], + ), + solve=SolveConfig(step_size_minutes=30.0), + node_level_results=NodeLevelResultsConfig( + collect_all_conductors=False, + collect_all_energy_consumers=True, + collect_all_switches=False, + collect_all_transformers=True, + collect_current=False, + collect_power=True, + collect_voltage=True, + mrids_to_collect=['mrid_one', 'mrid_two'], + ), + raw_results=RawResultsConfig(True, True, True, True, True) + ), + + result_processor_config=ResultProcessorConfig( + writer_config=WriterConfig( + output_writer_config=WriterOutputConfig( + enhanced_metrics_config=EnhancedMetricsConfig( + True, + False, + True, + True, + True, + True, + True, + True, + True, + True, + ))), + stored_results=StoredResultsConfig(False, False, True, False), + metrics=MetricsResultsConfig(True) + ), + quality_assurance_processing=True + ) + + expected = { + 'input': { + 'executorConfig': {}, + 'feederConfigs': None, + 'forecastConfig': { + 'feeders': ['MS1112'], + 'fixedTime': None, + 'scenarios': ['test_central_with_transfers_feeder_poe50'], + 'timePeriod': { + 'endTime': '2025-06-01T00:00:00', + 'overrides': None, + 'startTime': '2024-06-01T00:00:00' + }, + 'years': [2025, 2026], + }, + 'generatorConfig': { + 'model': { + 'calibration': None, + 'closedLoopTimeDelay': None, + 'closedLoopVBand': None, + 'closedLoopVLimit': None, + 'closedLoopVRegEnabled': False, + 'closedLoopVRegReplaceAll': None, + 'closedLoopVRegSetPoint': 0.9925, + 'collapseLvNetworks': None, + 'collapseNegligibleImpedances': None, + 'collapseSWER': None, + 'combineCommonImpedances': None, + 'ctPrimScalingFactor': None, + 'defaultGenVar': None, + 'defaultGenWatts': None, + 'defaultLoadVar': None, + 'defaultLoadWatts': [920, 910, 820, 690, 600, 530, 490, 470, 450, 450, 470, 490, 540, 600, 620, 680, 700, 700, 710, 720, 720, 730, 730, 730, 730, 730, 710, 710, 720, 730, 740, 780, 830, 890, 930, 1000, 1020, 1020, 1000, 970, 940, 900, 860, 800, 830, 870, 900, 920], + 'defaultTapChangerBand': None, + 'defaultTapChangerSetPointPu': None, + 'defaultTapChangerTimeDelay': None, + 'feederScenarioAllocationStrategy': 'ADDITIVE', + 'fixOverloadingConsumers': True, + 'fixSinglePhaseLoads': False, + 'fixUndersizedServiceLines': True, + 'genVMaxPu': None, + 'genVMinPu': None, + 'inverterControlConfig': None, + 'loadIntervalLengthHours': 0.5, + 'loadModel': None, + 'loadPlacement': None, + 'loadVMaxPu': 1.2, + 'loadVMinPu': 0.8, + 'maxGenTxRatio': 4.0, + 'maxLoadLvLineRatio': 2.0, + 'maxLoadServiceLineRatio': 1.0, + 'maxLoadTxRatio': 2.0, + 'maxSinglePhaseLoad': 15000.0, + 'meterPlacementConfig': None, + 'pFactorBaseExports': -1, + 'pFactorBaseImports': 1, + 'pFactorForecastPv': 1, + 'seed': 123, + 'simplifyNetwork': None, + 'useSpanLevelThreshold': False, + 'ratingThreshold': None, + 'simplifyPLSIThreshold': None, + 'emergAmpScaling': None, + 'splitPhaseDefaultLoadLossPercentage': None, + 'splitPhaseLVKV': None, + 'swerVoltageToLineVoltage': None, + 'transformerTapSettings': None, + 'vmPu': None, + }, + 'nodeLevelResults': { + 'collectAllConductors': False, + 'collectAllEnergyConsumers': True, + 'collectAllSwitches': False, + 'collectAllTransformers': True, + 'collectCurrent': False, + 'collectPower': True, + 'collectVoltage': True, + 'mridsToCollect': ['mrid_one', 'mrid_two'], + }, + 'rawResults': { + 'energyMeterVoltagesRaw': True, + 'energyMetersRaw': True, + 'overloadsRaw': True, + 'resultsPerMeter': True, + 'voltageExceptionsRaw': True, + }, + 'solve': { + 'baseFrequency': None, + 'emergVMaxPu': None, + 'emergVMinPu': None, + 'maxControlIter': None, + 'maxIter': None, + 'mode': None, + 'normVMaxPu': None, + 'normVMinPu': None, + 'stepSizeMinutes': 30.0, + 'voltageBases': None, + }, + }, + 'intervention': None, + 'qualityAssuranceProcessing': True, + 'resultProcessorConfig': { + 'metrics': { + 'calculatePerformanceMetrics': True + }, + 'storedResults': { + 'energyMeterVoltagesRaw': False, + 'energyMetersRaw': False, + 'overloadsRaw': True, + 'voltageExceptionsRaw': False, + }, + 'writerConfig': { + 'outputWriterConfig': { + 'enhancedMetricsConfig': { + 'calculateCO2': True, + 'calculateEmergForGenThermal': True, + 'calculateEmergForLoadThermal': True, + 'calculateNormalForGenThermal': True, + 'calculateNormalForLoadThermal': True, + 'populateConstraints': True, + 'populateDurationCurves': True, + 'populateEnhancedMetrics': True, + 'populateEnhancedMetricsProfile': False, + 'populateWeeklyReports': True + } + }, + 'writerType': None + } + } + }, + 'workPackageName': 'test_default_load_work_package' + } + + + + + + + assert work_package.to_json() == expected + + +def test_open_dss_config_to_json(): + expected = { + "modelName": "TEST OPENDSS MODEL 1", + "isPublic": True, + "generationSpec": { + "modelOptions": { + "feeder": "feeder1", + "scenario": "scenario1", + "year": 2024, + }, + "modulesConfiguration": { + "common": { + **({"fixedTime": { + "loadTime": "2022-04-01T00:00:00", + "overrides": [{ + 'loadId': 'meter1', + 'loadWattsOverride': [1.0], + 'genWattsOverride': [2.0], + 'loadVarOverride': [3.0], + 'genVarOverride': [4.0], + }] + }} if isinstance(OPENDSS_CONFIG.load_time, FixedTime) else + {"timePeriod": { + "startTime": "2022-04-01T10:13:00", + "endTime": "2023-04-01T12:14:00", + "overrides": [{ + 'loadId': 'meter1', + 'loadWattsOverride': [1.0], + 'genWattsOverride': [2.0], + 'loadVarOverride': [3.0], + 'genVarOverride': [4.0], + }], + }}), + }, + "generator": { + "model": { + "vmPu": 1.0, + "loadVMinPu": 0.80, + "loadVMaxPu": 1.15, + "genVMinPu": 0.50, + 'inverterControlConfig': {'afterCutOffProfile': 'afterProfile', + 'beforeCutOffProfile': 'beforeProfile', + 'cutOffDate': '2024-04-12T11:42:00'}, + "genVMaxPu": 2.00, + "loadModel": 1, + "calibration": False, + "pFactorBaseExports": 0.95, + "pFactorBaseImports": 0.90, + "pFactorForecastPv": 1.0, + "fixSinglePhaseLoads": True, + "maxSinglePhaseLoad": 30000.0, + "fixOverloadingConsumers": True, + "maxLoadTxRatio": 3.0, + "maxGenTxRatio": 10.0, + "fixUndersizedServiceLines": True, + "maxLoadServiceLineRatio": 1.5, + "maxLoadLvLineRatio": 2.0, + "collapseLvNetworks": False, + "collapseNegligibleImpedances": False, + "collapseSWER": False, + "combineCommonImpedances": False, + "feederScenarioAllocationStrategy": "ADDITIVE", + "closedLoopVRegEnabled": True, + "closedLoopVRegReplaceAll": True, + "closedLoopVRegSetPoint": 0.985, + "closedLoopVBand": 2.0, + "closedLoopTimeDelay": 100, + "closedLoopVLimit": 1.1, + "defaultTapChangerTimeDelay": 100, + "defaultTapChangerSetPointPu": 1.0, + "defaultTapChangerBand": 2.0, + "splitPhaseDefaultLoadLossPercentage": 0.4, + "splitPhaseLVKV": 0.25, + "swerVoltageToLineVoltage": [ + [230, 400], + [240, 415], + [250, 433], + [6350, 11000], + [6400, 11000], + [12700, 22000], + [19100, 33000], + ], + "loadPlacement": "PER_USAGE_POINT", + "loadIntervalLengthHours": 0.5, + "meterPlacementConfig": { + "feederHead": True, + "distTransformers": True, + "switchMeterPlacementConfigs": [ + { + "meterSwitchClass": "LOAD_BREAK_SWITCH", + "namePattern": ".*", + }, + ], + "energyConsumerMeterGroup": "meter group 1", + }, + "seed": 42, + "simplifyNetwork": False, + "defaultLoadWatts": [100.0, 200.0, 300.0], + "defaultGenWatts": [50.0, 150.0, 250.0], + "defaultLoadVar": [10.0, 20.0, 30.0], + "defaultGenVar": [5.0, 15.0, 25.0], + "transformerTapSettings": "tap-3", + "ctPrimScalingFactor": 2.0, + 'useSpanLevelThreshold': True, + 'ratingThreshold': 20.0, + 'simplifyPLSIThreshold': 20.0, + 'emergAmpScaling': 1.8, + }, + 'nodeLevelResults': { + 'collectAllConductors': False, + 'collectAllEnergyConsumers': True, + 'collectAllSwitches': False, + 'collectAllTransformers': True, + 'collectCurrent': False, + 'collectPower': True, + 'collectVoltage': True, + 'mridsToCollect': [ + 'mrid_one', + 'mrid_two', + ] + }, + "solve": { + "normVMinPu": 0.9, + "normVMaxPu": 1.054, + "emergVMinPu": 0.8, + "emergVMaxPu": 1.1, + "baseFrequency": 50, + "voltageBases": [0.4, 0.433, 6.6, 11.0, 22.0, 33.0, 66.0, 132.0], + "maxIter": 25, + "maxControlIter": 20, + "mode": "YEARLY", + "stepSizeMinutes": 60, + }, + "rawResults": { + "energyMeterVoltagesRaw": True, + "energyMetersRaw": True, + "resultsPerMeter": True, + "overloadsRaw": True, + "voltageExceptionsRaw": True, + }, + } + } + } + } + assert OPENDSS_CONFIG.to_json() == expected def run_opendss_export_request_handler(request): actual_body = json.loads(request.data.decode()) @@ -1337,9 +1812,11 @@ def run_opendss_export_request_handler(request): def test_run_opendss_export_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(run_opendss_export_request_handler) @@ -1351,9 +1828,11 @@ def test_run_opendss_export_no_verify_success(httpserver: HTTPServer): def test_run_opendss_export_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -1365,9 +1844,11 @@ def test_run_opendss_export_invalid_certificate_failure(ca: trustme.CA, httpserv def test_run_opendss_export_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -1378,6 +1859,20 @@ def test_run_opendss_export_valid_certificate_success(ca: trustme.CA, httpserver httpserver.check_assertions() assert res == {"result": "success"} +get_ingestor_run_list_query = """ + query listIngestorRuns($filter: IngestorRunsFilterInput, $sort: IngestorRunsSortCriteriaInput) { + listIngestorRuns(filter: $filter, sort: $sort) { + id + containerRuntimeType + payload + token + status + startedAt + statusLastUpdatedAt + completedAt + } + } + """ get_paged_opendss_models_query = """ query pagedOpenDssModels($limit: Int, $offset: Long, $filter: GetOpenDssModelsFilterInput, $sort: GetOpenDssModelsSortCriteriaInput) { @@ -1426,9 +1921,11 @@ def get_paged_opendss_models_request_handler(request): def test_get_paged_opendss_models_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1443,9 +1940,11 @@ def test_get_paged_opendss_models_no_verify_success(httpserver: HTTPServer): def test_get_paged_opendss_models_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -1467,9 +1966,11 @@ def get_paged_opendss_models_no_param_request_handler(request): def test_get_paged_opendss_models_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -1482,9 +1983,11 @@ def test_get_paged_opendss_models_valid_certificate_success(ca: trustme.CA, http def test_get_opendss_model_download_url_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/opendss-model/1", method="GET").respond_with_response(Response( @@ -1499,9 +2002,11 @@ def test_get_opendss_model_download_url_no_verify_success(httpserver: HTTPServer def test_get_opendss_model_download_url_invalid_certificate_failure(ca: trustme.CA, httpserver: HTTPServer): with trustme.Blob(b"invalid ca").tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -1516,9 +2021,11 @@ def test_get_opendss_model_download_url_invalid_certificate_failure(ca: trustme. def test_get_opendss_model_download_url_valid_certificate_success(ca: trustme.CA, httpserver: HTTPServer): with ca.cert_pem.tempfile() as ca_filename: eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=True, + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=True, + ), ca_filename=ca_filename ) @@ -1544,9 +2051,11 @@ def run_ingestor_request_handler(request): def test_run_ingestor_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler( @@ -1561,7 +2070,7 @@ def get_ingestor_run_request_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - assert query == "query getIngestorRun($id: Int!) { getIngestorRun(id: $id) { id containerRuntimeType, payload, token, status, startedAt, statusLastUpdatedAt, completedAt } }" + assert query == "query getIngestorRun($id: Int!) { getIngestorRun(id: $id) { id containerRuntimeType payload token status startedAt statusLastUpdatedAt completedAt } }" assert actual_body['variables'] == {"id": 1} return Response(json.dumps({"result": "success"}), status=200, content_type="application/json") @@ -1569,9 +2078,11 @@ def get_ingestor_run_request_handler(request): def test_get_ingestor_run_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(get_ingestor_run_request_handler) @@ -1584,20 +2095,6 @@ def get_ingestor_run_list_request_empty_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - get_ingestor_run_list_query = """ - query listIngestorRuns($filter: IngestorRunsFilterInput, $sort: IngestorRunsSortCriteriaInput) { - listIngestorRuns(filter: $filter, sort: $sort) { - id - containerRuntimeType, - payload, - token, - status, - startedAt, - statusLastUpdatedAt, - completedAt - } - } - """ assert query == " ".join(line.strip() for line in get_ingestor_run_list_query.strip().splitlines()) assert actual_body['variables'] == {} @@ -1606,9 +2103,11 @@ def get_ingestor_run_list_request_empty_handler(request): def test_get_ingestor_run_list_empty_filter_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(get_ingestor_run_list_request_empty_handler) @@ -1621,20 +2120,6 @@ def get_ingestor_run_list_request_complete_handler(request): actual_body = json.loads(request.data.decode()) query = " ".join(actual_body['query'].split()) - get_ingestor_run_list_query = """ - query listIngestorRuns($filter: IngestorRunsFilterInput, $sort: IngestorRunsSortCriteriaInput) { - listIngestorRuns(filter: $filter, sort: $sort) { - id - containerRuntimeType, - payload, - token, - status, - startedAt, - statusLastUpdatedAt, - completedAt - } - } - """ assert query == " ".join(line.strip() for line in get_ingestor_run_list_query.strip().splitlines()) assert actual_body['variables'] == { "filter": { @@ -1657,9 +2142,11 @@ def get_ingestor_run_list_request_complete_handler(request): def test_get_ingestor_run_list_all_filters_no_verify_success(httpserver: HTTPServer): eas_client = EasClient( - LOCALHOST, - httpserver.port, - verify_certificate=False + BaseAuthMethod( + LOCALHOST, + httpserver.port, + verify_certificate=False + ) ) httpserver.expect_oneshot_request("/api/graphql").respond_with_handler(