diff --git a/README.md b/README.md index 98acd4f..70daebd 100644 --- a/README.md +++ b/README.md @@ -277,6 +277,19 @@ daily_stats = stats.get_stats( ) ``` +### 2. emon_api + +The `emon_api` module is Emoncms python api module, used to interract with Emoncms server instance. + +#### Features + +- Data Reading: Efficiently read data from Emoncms. +- Data Writing: Set Inputs, feeds or values + +### Usage Examples: + +... + ## Running Tests To ensure everything is functioning correctly, run the test suite: diff --git a/emon_tools/__init__.py b/emon_tools/__init__.py index 1ffaf41..183750d 100644 --- a/emon_tools/__init__.py +++ b/emon_tools/__init__.py @@ -1,2 +1,2 @@ """emon-tools package""" -__version__ = "0.1.1" +__version__ = "0.1.2" diff --git a/emon_tools/api_common.py b/emon_tools/api_common.py new file mode 100644 index 0000000..08d6aed --- /dev/null +++ b/emon_tools/api_common.py @@ -0,0 +1,117 @@ +""" +Api common utilities. +""" +import re +from typing import Any + +HTTP_STATUS = { + 400: "invalid request", + 401: "unauthorized access", + 404: "Not found", + 406: "URI not acceptable", +} + +MESSAGE_KEY = "message" +SUCCESS_KEY = "success" + + +class Utils: + """Emoncms data Helper""" + + @staticmethod + def is_str(text: str, not_empty=False) -> bool: + """ + Test if text is a string. + + :Example : + >>> Utils.is_str(text='hello') + >>> True + :param text: str: Value to test. + :return: bool: True if value is valid string object. + """ + result = isinstance(text, str) + if not_empty: + result = result and len(text) > 0 + return result + + @staticmethod + def is_list(data: Any, not_empty: bool = False) -> bool: + """ + Test if data is a list. + + :Example : + >>> Utils.is_list(data=['hello']) + >>> True + :param data: Any: Value to test. + :return: bool: True if value is valid list object. + """ + result = isinstance(data, list) + if not_empty: + result = result and len(data) > 0 + return result + + @staticmethod + def is_dict(data: Any, not_empty: bool = False) -> bool: + """ + Test if data is a dict. + + :Example : + >>> Utils.is_dict(data=['hello']) + >>> True + :param data: Any: Value to test. + :return: bool: True if value is valid dict object. + """ + result = isinstance(data, dict) + if not_empty: + result = result and len(data) > 0 + return result + + @staticmethod + def is_valid_node(text) -> bool: + """ + Test if text is valid node or name values. + + [Original regex from emoncms](https://github.com/emoncms/emoncms/blob/master/Modules/feed/feed_model.php#L99) + + :Example : + >>> Utils.is_valid_node(text="Node1") + >>> True + + :param text: str: Node value. + :return: bool: True if text is valid node. + """ + result = False + if Utils.is_str(text): + matches = re.findall(r'[\w\s\-:]', text, flags=re.UNICODE) + result = Utils.is_list(matches, not_empty=True) + return result + + @staticmethod + def is_request_success(result) -> bool: + """ + Test if request to emoncms is success. + + :Example : + >>> Utils.is_request_success(result={"success": "true"}) + >>> True + :param result: dict: The request json response. + :return: bool: True if the request return success. + """ + return Utils.is_dict(result)\ + and result.get('success') == "true" + + @staticmethod + def compute_response(result: dict) -> bool: + """ + Compute the response from emoncms. + """ + if Utils.is_dict(result): + success = result.get(SUCCESS_KEY) is True + message = result.get(MESSAGE_KEY) + elif Utils.is_str(result): + success = True + message = result + else: + success = False + message = "Invalid response" + return success, message diff --git a/emon_tools/emon_api.py b/emon_tools/emon_api.py new file mode 100644 index 0000000..163260b --- /dev/null +++ b/emon_tools/emon_api.py @@ -0,0 +1,467 @@ +""" +Emoncms Client for interacting with feed, input, and user data. + +This module provides an asynchronous client +to communicate with an Emoncms server, +allowing users to retrieve feeds, inputs, +and other related data through the Emoncms API. + +Emoncms API behavior: +- For the `feed` module: + - A non-existing JSON route responds with a JSON object: + `{success: false, message: "Feed does not exist"}`. + - Examples of invalid routes: + - `/feed/aget.json?id=200` if feed 200 does not exist. + - `/feed/basket.json` if the route is invalid. + - The route `/feed/list.json` always returns an array of JSON objects, + which can be empty if no feeds exist. +- For the `user` module: + - A non-existing JSON route responds with `false`, + which is not a JSON object. + - This behavior can result in a `TypeError` + when accessing keys in the response. + +Security and validation: +- Parameters such as `url`, `path`, and query parameters are validated + and sanitized to prevent injection attacks. +- The API key is validated to ensure it is alphanumeric and secure. +""" +import asyncio +from enum import Enum +import logging +from dataclasses import dataclass, field +from typing import Any, Optional, TypeVar, List, Dict +from urllib.parse import quote, urljoin + +from aiohttp import ClientError, ClientSession +from emon_tools.api_common import Utils as Ut +from emon_tools.api_common import HTTP_STATUS +from emon_tools.api_common import MESSAGE_KEY +from emon_tools.api_common import SUCCESS_KEY + +logging.basicConfig() + +Self = TypeVar("Self", bound="EmonRequest") + + +class InputGetType(Enum): + """Remove Nan Method Enum""" + PROCESS_LIST = "process_list" + EXTENDED = "extended" + + +@dataclass +class EmonRequest: + """ + Base class for interacting with the Emoncms API. + + This class handles HTTP GET requests to the Emoncms server, + ensuring that requests are properly validated and secured. + It includes utilities for session management and common error handling. + + Attributes: + url (str): + The base URL of the Emoncms server (e.g., "http://emoncms.local"). + api_key (str): + The API key for authenticating with the Emoncms server. + request_timeout (int): + Timeout for HTTP requests in seconds (default: 20). + """ + url: str + api_key: str + request_timeout: int = 20 + _session: Optional[ClientSession] = field(default=None, init=False) + _close_session: bool = field(default=False, init=False) + logger = logging.getLogger(__name__) + + def __post_init__(self): + """Validate and sanitize initialization parameters.""" + self.url = self._sanitize_url(self.url) + self.api_key = self._validate_api_key(self.api_key) + + @staticmethod + def _sanitize_url(url: str) -> str: + """ + Ensure the URL is valid and properly formatted. + + Args: + url (str): The base URL. + + Returns: + str: Sanitized URL. + + Raises: + ValueError: If the URL is empty or improperly formatted. + """ + if not isinstance(url, str) or not url.strip(): + raise ValueError("URL must be a non-empty string.") + if not (url.startswith("http://") or url.startswith("https://")): + raise ValueError("URL must start with 'http://' or 'https://'.") + return url.rstrip("/") # Remove trailing slash for consistency. + + @staticmethod + def _validate_api_key(api_key: str) -> str: + """ + Validate the API key for proper format. + + Args: + api_key (str): The API key. + + Returns: + str: Validated API key. + + Raises: + ValueError: If the API key is not a non-empty alphanumeric string. + """ + if not isinstance(api_key, str) or not api_key.isalnum(): + raise ValueError( + "API key must be a non-empty alphanumeric string." + ) + return api_key + + @property + def session(self) -> ClientSession: + """ + Get or create an aiohttp ClientSession. + + Returns: + ClientSession: The active session for making HTTP requests. + """ + if self._session is None: + self._session = ClientSession() + self._close_session = True + return self._session + + async def async_request( + self, + path: str, + params: Optional[Dict[str, Any]] = None + ) -> Dict[str, Any]: + """ + Make a GET request to the Emoncms server. + + Args: + path (str): API endpoint path (e.g., "/feed/list.json"). + params (Optional[Dict[str, Any]]): + Query parameters to include in the request. + + Returns: + Dict[str, Any]: A dictionary containing the response data. + + Raises: + ValueError: If the path is invalid or empty. + """ + if not path or not isinstance(path, str): + raise ValueError("Path must be a non-empty string.") + + # Encode unsafe characters in the path. + path = quote(path.lstrip('/'), safe="/") + # Safely join the base URL and path. + full_url = urljoin(self.url, path) + + if params is None: + params = {} + # Ensure the API key is always included. + params["apikey"] = self.api_key + + # Validate and encode all parameters + encoded_params = { + key: quote(str(value), safe="") + for key, value in params.items() + } + + data = {SUCCESS_KEY: False, MESSAGE_KEY: None} + self.logger.debug( + "Requesting URL: %s with params: %s", + full_url, + encoded_params) + + try: + async with self.session.get( + full_url, timeout=self.request_timeout, params=encoded_params + ) as response: + if response.status == 200: + success, message = Ut.compute_response( + await response.json() + ) + data[SUCCESS_KEY] = success + data[MESSAGE_KEY] = message + else: + error_msg = ( + f"HTTP {response.status}: " + f"{HTTP_STATUS.get(response.status, 'Unknown error')}") + data[MESSAGE_KEY] = error_msg + self.logger.error(error_msg) + except ClientError as err: + error_msg = f"Client error: {err}" + data[MESSAGE_KEY] = error_msg + self.logger.error(error_msg) + except asyncio.TimeoutError: + error_msg = "Request timeout." + data[MESSAGE_KEY] = error_msg + self.logger.error(error_msg) + + return data + + async def close(self) -> None: + """Close the ClientSession if it was created internally.""" + if self._session and self._close_session: + await self._session.close() + + async def __aenter__(self) -> Self: + """Enter an asynchronous context manager.""" + return self + + async def __aexit__(self, *_exc_info: Any) -> None: + """Exit an asynchronous context manager and close the session.""" + await self.close() + + +@dataclass +class EmonReader(EmonRequest): + """ + Extended client for interacting with specific Emoncms endpoints. + + Provides additional methods for fetching feed, input, and user data. + """ + + async def async_get_uuid(self) -> Optional[str]: + """ + Retrieve the unique UUID for the authenticated user. + + Returns: + Optional[str]: The UUID string if available, otherwise None. + """ + result = await self.async_request("/user/getuuid.json") + if result[SUCCESS_KEY] and isinstance(result[MESSAGE_KEY], str): + return result[MESSAGE_KEY] + self.logger.warning( + "UUID retrieval failed: %s", result[MESSAGE_KEY] + ) + return None + + async def async_list_feeds(self) -> Optional[List[Dict[str, Any]]]: + """ + Retrieve the list of feeds. + + Returns: + Optional[List[Dict[str, Any]]]: A list of feed dictionaries + or None if retrieval fails. + """ + feed_data = await self.async_request("/feed/list.json") + if feed_data[SUCCESS_KEY]: + return feed_data[MESSAGE_KEY] + self.logger.warning( + "Failed to list feeds: %s", feed_data[MESSAGE_KEY]) + return None + + async def async_get_feed_fields( + self, + feed_id: int + ) -> Optional[Dict[str, Any]]: + """ + Get all fields for a specific feed by ID. + + Args: + feed_id (int): The ID of the feed to retrieve. + + Returns: + Optional[Dict[str, Any]]: + A dictionary of feed fields or None if the feed does not exist. + """ + if feed_id < 0: + raise ValueError("Feed ID must be a non-negative integer.") + params = {"id": feed_id} + response = await self.async_request("/feed/aget.json", params=params) + if response[SUCCESS_KEY]: + return response[MESSAGE_KEY] + self.logger.warning( + "Failed to get feed fields: %s", response[MESSAGE_KEY]) + return None + + async def async_get_feed_meta( + self, + feed_id: int + ) -> Optional[Dict[str, Any]]: + """ + Get metadata for a specific feed by ID. + + Args: + feed_id (int): The ID of the feed to retrieve metadata for. + + Returns: + Optional[Dict[str, Any]]: A dictionary of metadata + or None if the feed does not exist. + """ + if feed_id < 0: + raise ValueError("Feed ID must be a non-negative integer.") + params = {"id": feed_id} + feed_data = await self.async_request( + "/feed/getmeta.json", + params=params) + if feed_data[SUCCESS_KEY]: + return feed_data[MESSAGE_KEY] + self.logger.warning( + "Failed to get feed meta: %s", feed_data[MESSAGE_KEY]) + return None + + async def async_get_last_value_feed( + self, + feed_id: int + ) -> Optional[Dict[str, Any]]: + """ + Get the last time and value for a specific feed by ID. + + Args: + feed_id (int): The ID of the feed to retrieve. + + Returns: + Optional[Dict[str, Any]]: + A dictionary containing the last time and value + or None if the feed does not exist. + """ + if feed_id < 0: + raise ValueError("Feed ID must be a non-negative integer.") + params = {"id": feed_id} + feed_data = await self.async_request( + "/feed/timevalue.json", + params=params) + if feed_data[SUCCESS_KEY]: + return feed_data[MESSAGE_KEY] + self.logger.warning( + "Failed to get last feed value: %s", feed_data[MESSAGE_KEY]) + return None + + async def async_list_inputs( + self, + node: Optional[str] = None + ) -> Optional[List[Dict[str, Any]]]: + """ + Retrieve a list of inputs, optionally filtered by node. + + Args: + node (Optional[str]): + The node name to filter inputs by. If not provided, + all inputs are retrieved. + + Returns: + Optional[List[Dict[str, Any]]]: A list of input dictionaries + or None if retrieval fails. + """ + path = f"/input/get/{quote(node)}" if node else "/input/get" + feed_data = await self.async_request(path) + if feed_data[SUCCESS_KEY]: + return feed_data[MESSAGE_KEY] + self.logger.warning( + "Failed to list inputs: %s", feed_data[MESSAGE_KEY]) + return None + + async def async_list_inputs_fields( + self, + get_type: InputGetType = InputGetType.PROCESS_LIST + ) -> Optional[List[Dict[str, Any]]]: + """ + Retrieve a list of inputs fields, optionally filtered by get_type. + + Args: + get_type (InputGetType): The type of output to retrieve. + - InputGetType.PROCESS_LIST: + Retrieve the list of inputs + with id's and process_list's values. + - InputGetType.EXTENDED: + Retrieve the list of inputs with all fields. + + Returns: + Optional[List[Dict[str, Any]]]: A list of input dictionaries + or None if retrieval fails. + """ + if get_type == InputGetType.PROCESS_LIST: + path = "/input/getinputs" + else: + path = "/input/list" + + feed_data = await self.async_request(path) + if feed_data[SUCCESS_KEY]: + return feed_data[MESSAGE_KEY] + self.logger.warning( + "Failed to list inputs fields: %s", feed_data[MESSAGE_KEY]) + return None + + async def async_get_input_fields( + self, + node: str, + name: str + ) -> Optional[Dict[str, Any]]: + """ + Fetch specific input details from a node and input name. + + Args: + node (str): The name of the node containing the input. + name (str): The name of the input to retrieve. + + Returns: + Optional[Dict[str, Any]]: + A dictionary of input details + or None if the input does not exist. + """ + if not node or not name: + raise ValueError("Node and name must be non-empty strings.") + path = f"/input/get/{quote(node)}/{quote(name)}" + response = await self.async_request(path) + if response[SUCCESS_KEY]: + return response[MESSAGE_KEY] + self.logger.warning( + "Failed to get input fields: %s", response[MESSAGE_KEY]) + return None + + +@dataclass +class EmoncmsWrite(EmonRequest): + """Emoncms Write client Api.""" + + async def async_create_feed(self, + name: str, + tag: str, + engine: Optional[int] = None, + options: Optional[dict] = None + ) -> Optional[dict[str, Any]]: + """ + Create new feed. + On error return a dict as: + - {"success": false, "message": "Error Message"} + + Error messages are: + - [Bad tag Name]: invalid characters in feed tag + - [Bad name]: invalid characters in feed name + - [invalid engine number]: ABORTED: Engine id x is not supported. + + On Success ruturn a dict as: + - {"success": true, "feedid": 1, "result": true} + + [see valid engines here](https://github.com/emoncms/emoncms/blob/master/Lib/enum.php#L40) + + :Example : + - > await async_create_feed( name"tmp" ) => 1 + - > UType.is_str(value="tmp", not_null=True) => True + - > UType.is_str( 0 ) => False + :param name: Name of the new Feed + :param tag: Feed related Tag or Node + :param engine: Engine used to store data + :param options: Dict of options + :return: True if the given value is a str instance, otherwise False. + """ + result = None + if Ut.is_valid_node(name)\ + and Ut.is_valid_node(tag): + params = { + "apikey": self.api_key, + "tag": tag, + "name": name, + "engine": engine, + "options": options + } + feed_data = await self.async_request( + "/feed/create.json", params=params) + if feed_data[SUCCESS_KEY]: + return feed_data[MESSAGE_KEY] + return result diff --git a/emon_tools/fina_plot.py b/emon_tools/fina_plot.py index dc6df3b..6527d76 100644 --- a/emon_tools/fina_plot.py +++ b/emon_tools/fina_plot.py @@ -1,9 +1,9 @@ """ fina_plot_helper.py -This module provides utility classes and methods for visualizing financial and statistical data -using matplotlib and pandas. It includes tools for plotting grids, time-series data, and statistical -summaries in a clean and efficient manner. +This module provides utility classes and methods for visualizing data +using matplotlib and pandas. It includes tools for plotting grids, +time-series data, and statistical summaries in a clean and efficient manner. Dependencies: - numpy @@ -17,7 +17,8 @@ import pandas as pd except ImportError as e: raise ImportError( - "Pandas is required for this module. Install it with `pip install pandas`." + "Pandas is required for this module." + "Install it with `pip install pandas`." ) from e try: @@ -30,12 +31,13 @@ ) from e - class FinaPlot: """ - A base class for plot helpers, providing utility functions to enhance matplotlib visualizations. + A base class for plot helpers, providing utility functions + to enhance matplotlib visualizations. - This class offers methods to set up grid styles and other shared plotting configurations. + This class offers methods to set up grid styles + and other shared plotting configurations. """ @staticmethod def auto_plot_grid( @@ -50,14 +52,20 @@ def auto_plot_grid( Configure and enable gridlines for the current matplotlib plot. Parameters: - x_grid (bool): Whether to enable gridlines for the x-axis. Defaults to False. - x_major (bool): Whether to enable major ticks on the x-axis. Defaults to True. - x_minor (bool): Whether to enable minor ticks on the x-axis. Defaults to True. - y_grid (bool): Whether to enable gridlines for the y-axis. Defaults to False. - y_major (bool): Whether to enable major ticks on the y-axis. Defaults to True. - y_minor (bool): Whether to enable minor ticks on the y-axis. Defaults to True. + x_grid (bool): + Whether to enable gridlines for the x-axis. Defaults to False. + x_major (bool): + Whether to enable major ticks on the x-axis. Defaults to True. + x_minor (bool): + Whether to enable minor ticks on the x-axis. Defaults to True. + y_grid (bool): + Whether to enable gridlines for the y-axis. Defaults to False. + y_major (bool): + Whether to enable major ticks on the y-axis. Defaults to True. + y_minor (bool): + Whether to enable minor ticks on the y-axis. Defaults to True. """ - ax=plt.gca() + ax = plt.gca() ax.grid(True) if x_grid: if x_major: @@ -85,8 +93,10 @@ def plot(data: Union[np.ndarray, pd.DataFrame]): Plot a dataset using matplotlib. Parameters: - data (Union[np.ndarray, pd.DataFrame]): The data to be plotted. Can be a NumPy array - (with time and values) or a Pandas DataFrame with 'values'. + data (Union[np.ndarray, pd.DataFrame]): + The data to be plotted. + Can be a NumPy array (with time and values) + or a Pandas DataFrame with 'values'. Raises: ImportError: If matplotlib is not installed. @@ -106,7 +116,8 @@ def plot(data: Union[np.ndarray, pd.DataFrame]): class PlotStats(FinaPlot): """ - A class for visualizing statistical summaries, tailored for FinaStats and FinaDfStats objects. + A class for visualizing statistical summaries, + tailored for FinaStats and FinaDfStats objects. """ @staticmethod @@ -116,7 +127,7 @@ def plot_values(data: Union[np.ndarray, pd.DataFrame]): Parameters: data (Union[np.ndarray, pd.DataFrame]): - The statistical data to plot. Can be a NumPy array + The statistical data to plot. Can be a NumPy array or a Pandas DataFrame containing columns for mean, min, and max values. @@ -153,11 +164,14 @@ def plot_values(data: Union[np.ndarray, pd.DataFrame]): @staticmethod def plot_integrity(data: Union[np.ndarray, pd.DataFrame]): """ - Plot data integrity statistics, including daily points and finite values. + Plot data integrity statistics, + including daily points and finite values. Parameters: - data (Union[np.ndarray, pd.DataFrame]): The integrity data to plot. Can be a NumPy array - or a Pandas DataFrame containing columns for finite values and total points. + data (Union[np.ndarray, pd.DataFrame]): + The integrity data to plot. Can be a NumPy array + or a Pandas DataFrame containing columns + for finite values and total points. Raises: ImportError: If matplotlib is not installed. @@ -168,7 +182,7 @@ def plot_integrity(data: Union[np.ndarray, pd.DataFrame]): plt.figure(figsize=(18, 6)) plt.subplot(1, 1, 1) - ax=plt.gca() + ax = plt.gca() ax.grid(which='minor', alpha=0.2) ax.grid(which='major', alpha=0.5) ax.set_ylabel("Daily Points", color=color_y1) @@ -182,12 +196,19 @@ def plot_integrity(data: Union[np.ndarray, pd.DataFrame]): ax2 = ax.twinx() ax2.set_ylabel("Daily Points (%)", color=color_y2) ax2.tick_params(axis="y", labelcolor=color_y2) - ax2.plot(data[:, 0], data[:, 1], - label='Daily mean nb_finite values', - lw=3, - color=color_y2) - ax2.fill_between(data[:, 0], 0, data[:, 2], alpha=0.2, label='Daily min-max nb_points', - color=color_fill) + ax2.plot( + data[:, 0], + data[:, 1], + label='Daily mean nb_finite values', + lw=3, + color=color_y2) + ax2.fill_between( + data[:, 0], + 0, + data[:, 2], + alpha=0.2, + label='Daily min-max nb_points', + color=color_fill) else: ax.plot(data.index, data['nb_finite'] * 100 / data['nb_total'], label='Daily nb_finite (%)', @@ -196,10 +217,12 @@ def plot_integrity(data: Union[np.ndarray, pd.DataFrame]): ax2 = ax.twinx() ax2.set_ylabel("Daily Points (%)", color=color_y2) ax2.tick_params(axis="y", labelcolor=color_y2) - ax2.plot(data.index, data['nb_finite'], - label='Daily mean nb_finite values', - lw=3, - color=color_y2) + ax2.plot( + data.index, + data['nb_finite'], + label='Daily mean nb_finite values', + lw=3, + color=color_y2) ax2.fill_between( data.index, 0, diff --git a/emon_tools/fina_reader.py b/emon_tools/fina_reader.py index 9641f55..ac18f5f 100644 --- a/emon_tools/fina_reader.py +++ b/emon_tools/fina_reader.py @@ -49,7 +49,8 @@ def __init__(self, Raises: ValueError: - If any parameter is invalid or start_time is not less than end_time. + If any parameter is invalid + or start_time is not less than end_time. """ self.interval = interval self.start_time = start_time @@ -87,7 +88,10 @@ def interval(self, value: int): Raises: ValueError: If the value is not a positive integer. """ - self._interval = Utils.validate_integer(value, "interval", positive=True) + self._interval = Utils.validate_integer( + value, + "interval", + positive=True) @property def start_time(self) -> int: @@ -230,11 +234,13 @@ def __init__(self, feed_id: int, data_dir: str): def _sanitize_path(self, filename: str) -> str: """ - Ensure that the file path is within the allowed directory and has a valid extension. + Ensure that the file path is within the allowed directory + and has a valid extension. """ filepath = abspath(path_join(self._data_dir, filename)) if not filepath.startswith(self._data_dir): - raise ValueError("Attempt to access files outside the allowed directory.") + raise ValueError( + "Attempt to access files outside the allowed directory.") if splitext(filepath)[1] not in self.VALID_FILE_EXTENSIONS: raise ValueError("Invalid file extension.") return filepath @@ -245,7 +251,9 @@ def _validate_file_size(self, filepath: str, expected_size: int = 1024): """ file_size = getsize(filepath) if file_size > expected_size: - raise ValueError(f"File size exceeds the limit: {file_size} / {expected_size} bytes.") + raise ValueError( + "File size exceeds the limit: " + f"{file_size} / {expected_size} bytes.") def _get_base_path(self) -> str: return path_join(self._data_dir, str(self._feed_id)) @@ -289,13 +297,20 @@ def _validate_read_params( ValueError: If parameters are invalid. """ npoints = Utils.validate_integer(npoints, "npoints", positive=True) - self.chunk_size = Utils.validate_integer(chunk_size, "chunk_size", positive=True) + self.chunk_size = Utils.validate_integer( + chunk_size, + "chunk_size", + positive=True) if not isinstance(start_pos, int) or start_pos < 0: - raise ValueError(f"start_pos ({start_pos}) must be an integer upper or equal to zero.") + raise ValueError( + f"start_pos ({start_pos}) " + "must be an integer upper or equal to zero.") if start_pos >= npoints: - raise ValueError(f"start_pos ({start_pos}) exceeds total npoints ({npoints}).") + raise ValueError( + f"start_pos ({start_pos}) " + f"exceeds total npoints ({npoints}).") if window is not None: window = Utils.validate_integer(window, "window", positive=True) @@ -445,15 +460,18 @@ def read_file( Parameters: npoints (int): Total number of points in the file. - start_pos (int): Starting position (index) in the file. Defaults to 0. - chunk_size (int): Number of values to read in each chunk. Defaults to 1024. - window (Optional[int]): + start_pos (int): + Starting position (index) in the file. Defaults to 0. + chunk_size (int): + Number of values to read in each chunk. Defaults to 1024. + window (Optional[int]): Maximum number of points to read. If None, reads all available points. - set_pos (bool): Whether to automatically increment the position after reading. + set_pos (bool): + Whether to automatically increment the position after reading. Yields: - Tuple[np.ndarray, np.ndarray]: + Tuple[np.ndarray, np.ndarray]: - Array of positions (indices). - Array of corresponding data values. @@ -474,11 +492,14 @@ def read_file( try: with open(data_path, "rb") as file: - with mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as mm: + with mmap.mmap( + file.fileno(), 0, access=mmap.ACCESS_READ) as mm: while self._pos < start_pos + total_points: # Calculate current chunk size remaining_points = start_pos + total_points - self._pos - current_chunk_size = min(self.chunk_size, remaining_points) + current_chunk_size = min( + self.chunk_size, + remaining_points) # Compute offsets and read data offset = self._pos * 4 @@ -487,13 +508,17 @@ def read_file( if len(chunk_data) != current_chunk_size * 4: raise ValueError( - f"Failed to read expected chunk at position {self._pos}. " - f"Expected {current_chunk_size * 4} bytes, got {len(chunk_data)}." + "Failed to read expected chunk " + f"at position {self._pos}. " + f"Expected {current_chunk_size * 4} bytes, " + f"got {len(chunk_data)}." ) # Convert to values and yield values = np.frombuffer(chunk_data, dtype='>> from fina_time_series import FinaDataFrame >>> fina_data = FinaDataFrame("path_to_data_file") - >>> fina_result = fina_data.get_fina_time_series(start=0, step=60, window=3600) + >>> fina_result = fina_data.get_fina_time_series( + start=0, step=60, window=3600) >>> df = fina_result.df() >>> print(df) @@ -41,15 +45,15 @@ import pandas as pd except ImportError as e: raise ImportError( - "Pandas is required for this module. Install it with `pip install pandas`." + "Pandas is required for this module. " + "Install it with `pip install pandas`." ) from e - class FinaDataFrame(FinaData): """ - Extension of FinaData with additional methods to retrieve and handle time-series data - as Pandas DataFrames. + Extension of FinaData with additional methods to retrieve + and handle time-series data as Pandas DataFrames. Methods: - get_fina_time_series: @@ -57,7 +61,8 @@ class FinaDataFrame(FinaData): - get_fina_time_series_by_date: Retrieve time-series data by date range and return as a DataFrame. - set_data_frame: - Static method to convert arrays of time and values into a Pandas DataFrame. + Static method to convert arrays of time and values + into a Pandas DataFrame. """ def __init__(self, feed_id: int, data_dir: str): """ @@ -85,11 +90,13 @@ def get_fina_df_time_series( window (int): The total duration of the time window in seconds. Returns: - pd.DataFrame: A DataFrame with time as the index and data values as a column. + pd.DataFrame: + A DataFrame with time as the index and data values as a column. Raises: - ValueError: If the shape of the times and values arrays do not match. - + ValueError: + If the shape of the times and values arrays do not match. + Example: >>> fina_data.get_fina_time_series(start=0, step=60, window=3600) """ @@ -104,21 +111,24 @@ def get_fina_time_series_by_date( date_format: str = "%Y-%m-%d %H:%M:%S" ) -> pd.DataFrame: """ - Retrieve time series data by specifying a date range and convert it to a Pandas DataFrame. + Retrieve time series data by specifying a date range + and convert it to a Pandas DataFrame. Parameters: start_date (str): The start date as a string. end_date (str): The end date as a string. step (int): The interval between data points in seconds. date_format (str, optional): - The format of the input date strings. Defaults to "%Y-%m-%d %H:%M:%S". + The format of the input date strings. + Defaults to "%Y-%m-%d %H:%M:%S". Returns: - pd.DataFrame: A DataFrame with time as the index and data values as a column. + pd.DataFrame: + A DataFrame with time as the index and data values as a column. Raises: ValueError: If the parsed start or end date is invalid. - + Example: >>> fina_data.get_fina_time_series_by_date( "2023-01-01 00:00:00", @@ -144,15 +154,18 @@ def set_data_frame(times: np.ndarray, Parameters: times (np.ndarray): Array of time values (Unix timestamps). - values (np.ndarray): Array of data values corresponding to the times. + values (np.ndarray): + Array of data values corresponding to the times. Returns: - Optional[pd.DataFrame]: A DataFrame with time as the index and data values as a column. + Optional[pd.DataFrame]: + A DataFrame with time as the index and data values as a column. Raises: ValueError: - If the input arrays are not of the same shape or not instances of np.ndarray. - + If the input arrays are not of the same shape + or not instances of np.ndarray. + Example: >>> times = np.array([1672531199, 1672531259, 1672531319]) >>> values = np.array([1.0, 2.0, 3.0]) @@ -175,12 +188,15 @@ def set_data_frame(times: np.ndarray, class FinaDfStats(FinaStats): """ - Extension of FinaStats with methods to compute and return statistics as Pandas DataFrames. + Extension of FinaStats with methods to compute + and return statistics as Pandas DataFrames. Methods: - - get_df_stats: Compute daily statistics and return them as a Pandas DataFrame. + - get_df_stats: + Compute daily statistics and return them as a Pandas DataFrame. - get_df_stats_by_date: - Compute statistics for a specific date range and return as a DataFrame. + Compute statistics for a specific date range + and return as a DataFrame. - get_stats_labels: Get column labels for the statistics DataFrame. - get_integrity_labels: Get column labels for integrity stats. - get_values_labels: Get column labels for value stats. @@ -195,21 +211,27 @@ def get_df_stats( stats_type: StatsType = StatsType.VALUES ) -> List[List[Union[float, int]]]: """ - Compute statistics for data within a specified range and return them as a Pandas DataFrame. + Compute statistics for data within a specified range + and return them as a Pandas DataFrame. Parameters: - start_time (Optional[int]): The start time in seconds (Unix timestamp). Defaults to 0. - steps_window (int): The number of steps in the window. Defaults to -1 (all data). + start_time (Optional[int]): + The start time in seconds (Unix timestamp). Defaults to 0. + steps_window (int): + The number of steps in the window. Defaults to -1 (all data). max_size (int): Maximum size of the dataset. Defaults to 10,000. min_value (Optional[Union[int, float]]): Minimum valid value for filtering. Defaults to None. max_value (Optional[Union[int, float]]): Maximum valid value for filtering. Defaults to None. - stats_type (StatsType): Type of statistics to compute. Defaults to StatsType.VALUES. + stats_type (StatsType): + Type of statistics to compute. Defaults to StatsType.VALUES. Returns: - pd.DataFrame: A DataFrame containing computed statistics with time as the index. - + pd.DataFrame: + A DataFrame containing computed statistics + with time as the index. + Example: >>> stats_df = fina_stats.get_df_stats( start_time=0, @@ -229,7 +251,8 @@ def get_df_stats( ) return df.set_index(pd.to_datetime(df['time'], unit='s', utc=True)) - def get_df_stats_by_date(self, + def get_df_stats_by_date( + self, start_date: str, end_date: str, date_format: str = "%Y-%m-%d %H:%M:%S", @@ -246,17 +269,21 @@ def get_df_stats_by_date(self, start_date (str): The start date as a string. end_date (str): The end date as a string. date_format (str, optional): - The format of the input date strings. Defaults to "%Y-%m-%d %H:%M:%S". + The format of the input date strings. + Defaults to "%Y-%m-%d %H:%M:%S". max_size (int): Maximum size of the dataset. Defaults to 10,000. min_value (Optional[Union[int, float]]): Minimum valid value for filtering. Defaults to None. max_value (Optional[Union[int, float]]): Maximum valid value for filtering. Defaults to None. - stats_type (StatsType): Type of statistics to compute. Defaults to StatsType.VALUES. + stats_type (StatsType): + Type of statistics to compute. Defaults to StatsType.VALUES. Returns: - pd.DataFrame: A DataFrame containing computed statistics with time as the index. - + pd.DataFrame: + A DataFrame containing computed statistics + with time as the index. + Example: >>> stats_df = fina_stats.get_df_stats_by_date( "2023-01-01 00:00:00", @@ -276,16 +303,20 @@ def get_df_stats_by_date(self, return df.set_index(pd.to_datetime(df['time'], unit='s', utc=True)) @staticmethod - def get_stats_labels(stats_type: StatsType = StatsType.VALUES) -> List[str]: + def get_stats_labels( + stats_type: StatsType = StatsType.VALUES + ) -> List[str]: """ - Get the column labels for the statistics DataFrame based on the type of statistics. + Get the column labels for the statistics DataFrame + based on the type of statistics. Parameters: - stats_type (StatsType): The type of statistics (VALUES or INTEGRITY). + stats_type (StatsType): + The type of statistics (VALUES or INTEGRITY). Returns: List[str]: A list of column labels for the DataFrame. - + Example: >>> FinaDfStats.get_stats_labels(stats_type=StatsType.VALUES) """ @@ -300,7 +331,7 @@ def get_integrity_labels(): Returns: List[str]: A list of column labels for integrity stats. - + Example: >>> FinaDfStats.get_integrity_labels() """ @@ -315,7 +346,7 @@ def get_values_labels(): Returns: List[str]: A list of column labels for value stats. - + Example: >>> FinaDfStats.get_values_labels() """ diff --git a/emon_tools/fina_utils.py b/emon_tools/fina_utils.py index 475a791..0ff12a4 100644 --- a/emon_tools/fina_utils.py +++ b/emon_tools/fina_utils.py @@ -31,7 +31,8 @@ def validate_number( value (Union[int, float]): The value to validate. field_name (str): The name of the field for error messages. positive (bool, optional): - If True, ensures the value is strictly positive. Defaults to False. + If True, ensures the value is strictly positive. + Defaults to False. non_neg (bool, optional): If True, ensures the value is non-negative. Defaults to False. @@ -39,7 +40,8 @@ def validate_number( Union[int, float]: The validated value. Raises: - ValueError: If the value is not a number or does not meet the specified constraints. + ValueError: If the value is not a number + or does not meet the specified constraints. """ if not isinstance(value, (int, float)): raise ValueError(f"{field_name} must be a number.") @@ -63,7 +65,8 @@ def validate_integer( value (int): The value to validate. field_name (str): The name of the field for error messages. positive (bool, optional): - If True, ensures the value is strictly positive. Defaults to False. + If True, ensures the value is strictly positive. + Defaults to False. non_neg (bool, optional): If True, ensures the value is non-negative. Defaults to False. @@ -71,7 +74,8 @@ def validate_integer( int: The validated integer. Raises: - ValueError: If the value is not an integer or does not meet the specified constraints. + ValueError: If the value is not an integer + or does not meet the specified constraints. """ if not isinstance(value, int): raise ValueError(f"{field_name} must be an integer.") @@ -98,16 +102,20 @@ def validate_timestamp( Raises: ValueError: - If the input is not a positive number or exceeds the valid UNIX timestamp range. + If the input is not a positive number + or exceeds the valid UNIX timestamp range. """ # Validate the timestamp is a number and non-negative - Utils.validate_number(timestamp, f"{field_name} timestamp", non_neg=True) + Utils.validate_number( + timestamp, f"{field_name} timestamp", non_neg=True) - # Validate the timestamp is within a reasonable range for UNIX timestamps + # Validate the timestamp is within + # a reasonable range for UNIX timestamps max_timestamp = 2147480000 # Near the year 2038 problem threshold if not 0 <= timestamp <= max_timestamp: raise ValueError( - f"{field_name} must be a valid UNIX timestamp between 0 and {max_timestamp}." + f"{field_name} must be a valid UNIX timestamp " + f"between 0 and {max_timestamp}." ) # Attempt conversion to datetime to ensure validity @@ -164,7 +172,8 @@ def get_utc_datetime_from_string(dt_value: str, return naive_datetime.replace(tzinfo=dt.timezone.utc) except ValueError as e: raise ValueError( - f"Error parsing date '{dt_value}' with the format '{date_format}': {e}" + f"Error parsing date '{dt_value}' " + f"with the format '{date_format}': {e}" ) from e @staticmethod @@ -174,19 +183,24 @@ def get_dates_interval_from_timestamp( date_format: str = "%Y-%m-%d %H:%M:%S" ) -> Tuple[str, str]: """ - Generate formatted start and end date strings based on a start timestamp and window size. + Generate formatted start and end date strings + based on a start timestamp and window size. Parameters: start (int): The starting UNIX timestamp in seconds. window (int): The duration of the interval in seconds. date_format (str): - Format string for the output datetime. Defaults to "%Y-%m-%d %H:%M:%S". + Format string for the output datetime. + Defaults to "%Y-%m-%d %H:%M:%S". Returns: - Tuple[str, str]: A tuple containing the formatted start and end dates as strings. + Tuple[str, str]: + A tuple containing the formatted + start and end dates as strings. Raises: - ValueError: If `start` or `window` are not integers or are negative. + ValueError: If `start` or `window` + are not integers or are negative. """ if not isinstance(start, int) or not isinstance(window, int): raise ValueError("'start' and 'window' must be integers.") @@ -213,7 +227,8 @@ def get_window_by_dates(start_date: str, end_date (str): The end date as a string. interval (int): The time interval in seconds for each data point. date_format (str): - The format of the input date strings. Defaults to "%Y-%m-%d %H:%M:%S". + The format of the input date strings. + Defaults to "%Y-%m-%d %H:%M:%S". Returns: Tuple[int, int]: @@ -221,8 +236,10 @@ def get_window_by_dates(start_date: str, - Window size as the number of intervals. Raises: - ValueError: If the `start_date` is later than or equal to `end_date`. - ValueError: If the `interval` is not a positive integer. + ValueError: + If the `start_date` is later than or equal to `end_date`. + ValueError: + If the `interval` is not a positive integer. """ # Convert date strings to UTC datetime objects start_dt = Utils.get_utc_datetime_from_string(start_date, date_format) @@ -230,7 +247,8 @@ def get_window_by_dates(start_date: str, # Validate date range if start_dt >= end_dt: - raise ValueError("The start date must be earlier than the end date.") + raise ValueError( + "The start date must be earlier than the end date.") # Validate interval interval = Utils.validate_integer(interval, 'interval', positive=True) @@ -247,14 +265,16 @@ def get_dates_by_window(start: int, window: int ) -> Tuple[dt.datetime, dt.datetime]: """ - Calculate start and end UTC datetimes based on a start timestamp and window size. + Calculate start and end UTC datetimes + based on a start timestamp and window size. Parameters: start (int): Start timestamp in seconds since epoch. window (int): Duration of the window in seconds. Returns: - Tuple[dt.datetime, dt.datetime]: Start and end datetimes as UTC datetime objects. + Tuple[dt.datetime, dt.datetime]: + Start and end datetimes as UTC datetime objects. Raises: ValueError: If `start` or `window` is invalid. @@ -278,7 +298,8 @@ def filter_values_by_range( max_value: Optional[Union[int, float]] = None ) -> np.ndarray: """ - Filter an array of values by replacing those outside a specified range with NaN. + Filter an array of values by replacing + those outside a specified range with NaN. Parameters: values (np.ndarray): The ndarray of values to filter. @@ -288,7 +309,9 @@ def filter_values_by_range( Maximum valid value. Values above this will be set to NaN. Returns: - np.ndarray: The filtered array with values outside the specified range replaced by NaN. + np.ndarray: + The filtered array with values outside + the specified range replaced by NaN. Raises: ValueError: If `values` is not a numpy ndarray. diff --git a/requirements-dev.txt b/requirements-dev.txt index 1c92538..2cfeb04 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,4 +3,6 @@ pytest-cov>=6.0.0 coverage>=7.6.9 numpy>=2.2.0 pandas>=2.2.3 -matplotlib>=3.9.3 \ No newline at end of file +matplotlib>=3.9.3 +aiohttp>=3.8.1 +pytest-aiohttp>=1.0.5 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index e5fb7e4..bc41d69 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,6 +26,10 @@ test = pytest>=8.3.4 pytest-cov>=6.0.0 coverage>=7.6.9 + aiohttp>=3.8.1 + pytest-aiohttp>=1.0.5 + pandas>=2.2.3 + matplotlib>=3.9.3 notebook = jupyter>=1.0.0 pandas>=2.2.3 @@ -34,4 +38,6 @@ time_series = pandas>=2.2.3 plot = pandas>=2.2.3 - matplotlib>=3.9.3 \ No newline at end of file + matplotlib>=3.9.3 +api = + aiohttp>=3.8.1 \ No newline at end of file diff --git a/tests/test_async_emon_reader.py b/tests/test_async_emon_reader.py new file mode 100644 index 0000000..cf81217 --- /dev/null +++ b/tests/test_async_emon_reader.py @@ -0,0 +1,159 @@ +"""Tests for async_request method.""" + +from unittest.mock import AsyncMock, patch +import pytest +from emon_tools.emon_api import EmonReader, InputGetType + +API_KEY = "12345" +BASE_URL = "http://localhost:8080" + +MOCK_UUID = "abcd1234-5678-90ef-ghij-klmnopqrstuv" +MOCK_FEEDS = [ + { + "id": "1", + "userid": "1", + "name": "Cellule_Tcircuit", + "tag": " sofrel_circuit_Cellule", + "public": "0", + "size": "35811340", + "engine": "5", + "processList": "", + "unit": "", + "time": 1665509570, + "value": 17.690000534058, + } +] +MOCK_INPUT_LIST = { + "V": {"time": None, "value": None, "processList": "1:88"}, + "I": {"time": None, "value": None, "processList": "1:89"}, + "P": {"time": None, "value": None, "processList": "1:90"}, +} + +MOCK_INPUT_LIST_PROCESS = [ + {"id": "1", "processList": "1:88"} +] + +MOCK_INPUT_LIST_FIELDS = [ + { + "id": "1", "node_id": "emontx", "name": "temp", + "description": "", "processList": "1:88", + "time": None, "value": None} +] + +MOCK_INPUT_DETAILS = {"time": None, "value": None, "processList": "1:88"} + + +@pytest.fixture +def emon_reader(): + """Fixture to initialize an EmonReader instance.""" + return EmonReader(BASE_URL, API_KEY) + + +@pytest.mark.asyncio +async def test_async_get_uuid(emon_reader): + """Test retrieving UUID.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = {"success": True, "message": MOCK_UUID} + uuid = await emon_reader.async_get_uuid() + assert uuid == MOCK_UUID + mock_request.assert_called_once_with("/user/getuuid.json") + + +@pytest.mark.asyncio +async def test_async_list_feeds(emon_reader): + """Test listing feeds.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = {"success": True, "message": MOCK_FEEDS} + feeds = await emon_reader.async_list_feeds() + assert feeds == MOCK_FEEDS + mock_request.assert_called_once_with("/feed/list.json") + + +@pytest.mark.asyncio +async def test_async_get_feed_fields(emon_reader): + """Test retrieving feed fields.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = {"success": True, "message": MOCK_FEEDS[0]} + fields = await emon_reader.async_get_feed_fields(1) + assert fields == MOCK_FEEDS[0] + mock_request.assert_called_once_with( + "/feed/aget.json", params={"id": 1}) + + +@pytest.mark.asyncio +async def test_async_get_feed_meta(emon_reader): + """Test retrieving feed metadata.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = {"success": True, "message": MOCK_FEEDS[0]} + meta = await emon_reader.async_get_feed_meta(1) + assert meta == MOCK_FEEDS[0] + mock_request.assert_called_once_with( + "/feed/getmeta.json", params={"id": 1}) + + +@pytest.mark.asyncio +async def test_async_get_last_value_feed(emon_reader): + """Test retrieving last value of a feed.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = {"success": True, "message": MOCK_FEEDS[0]} + timevalue = await emon_reader.async_get_last_value_feed(1) + assert timevalue == MOCK_FEEDS[0] + mock_request.assert_called_once_with( + "/feed/timevalue.json", params={"id": 1}) + + +@pytest.mark.asyncio +async def test_async_list_inputs(emon_reader): + """Test listing inputs.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = { + "success": True, "message": MOCK_INPUT_LIST} + inputs = await emon_reader.async_list_inputs() + assert inputs == MOCK_INPUT_LIST + mock_request.assert_called_once_with("/input/get") + + +@pytest.mark.asyncio +async def test_async_list_inputs_process_fields(emon_reader): + """Test listing inputs.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = { + "success": True, "message": MOCK_INPUT_LIST_PROCESS} + inputs = await emon_reader.async_list_inputs_fields( + get_type=InputGetType.PROCESS_LIST + ) + assert inputs == MOCK_INPUT_LIST_PROCESS + mock_request.assert_called_once_with("/input/getinputs") + + +@pytest.mark.asyncio +async def test_async_list_inputs_extended_fields(emon_reader): + """Test listing inputs.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = { + "success": True, "message": MOCK_INPUT_LIST_FIELDS} + inputs = await emon_reader.async_list_inputs_fields( + get_type=InputGetType.EXTENDED + ) + assert inputs == MOCK_INPUT_LIST_FIELDS + mock_request.assert_called_once_with("/input/list") + + +@pytest.mark.asyncio +async def test_async_get_input_fields(emon_reader): + """Test retrieving input fields.""" + with patch.object( + emon_reader, "async_request", new=AsyncMock()) as mock_request: + mock_request.return_value = { + "success": True, "message": MOCK_INPUT_DETAILS} + input_details = await emon_reader.async_get_input_fields("test", "V") + assert input_details == MOCK_INPUT_DETAILS + mock_request.assert_called_once_with("/input/get/test/V") diff --git a/tests/test_async_emon_request.py b/tests/test_async_emon_request.py new file mode 100644 index 0000000..d6bd1f4 --- /dev/null +++ b/tests/test_async_emon_request.py @@ -0,0 +1,116 @@ +from aiohttp import web, ClientSession +from aiohttp.client_exceptions import ClientError +from unittest.mock import AsyncMock, patch +from emon_tools.emon_api import EmonRequest +import pytest + +API_KEY = "12345" +VALID_URL = "http://localhost:8080" +INVALID_URL = "http://localhost:9999" +INVALID_API_KEY = "" + + +@pytest.fixture +def mock_emon_request(): + """Fixture for creating a mock EmonRequest instance.""" + return EmonRequest(url=VALID_URL, api_key=API_KEY) + +async def mock_handler(request): + """Mock handler for the aiohttp server.""" + if "apikey" not in request.query or request.query["apikey"] != API_KEY: + raise web.HTTPUnauthorized(text="Invalid API key") + return web.json_response({"success": True, "message": "Mock response"}) + +@pytest.fixture +def aiohttp_server_mock(loop, aiohttp_server): + """Fixture to mock an aiohttp server.""" + app = web.Application() + app.router.add_get("/valid-path", mock_handler) + return loop.run_until_complete(aiohttp_server(app)) + +@pytest.mark.asyncio +async def test_async_request_success(aiohttp_server_mock, mock_emon_request): + """Test async_request with valid URL and API key.""" + mock_emon_request.url = str(aiohttp_server_mock.make_url("/")) + response = await mock_emon_request.async_request("/valid-path") + assert response["success"] is True + assert response["message"] == "Mock response" + +@pytest.mark.asyncio +async def test_async_request_invalid_path(mock_emon_request): + """Test async_request with an invalid path.""" + with pytest.raises(ValueError, match="Path must be a non-empty string."): + await mock_emon_request.async_request("") + +@pytest.mark.asyncio +async def test_async_request_invalid_api_key(aiohttp_server_mock, mock_emon_request): + """Test async_request with an invalid API key.""" + mock_emon_request.url = str(aiohttp_server_mock.make_url("/")) + mock_emon_request.api_key = INVALID_API_KEY + response = await mock_emon_request.async_request("/valid-path") + assert response["success"] is False + assert "unauthorized" in response["message"] + +@pytest.mark.asyncio +async def test_async_request_timeout(mock_emon_request): + """Test async_request handling of timeouts.""" + mock_emon_request.url = INVALID_URL + mock_emon_request.request_timeout = 0.0001 + response = await mock_emon_request.async_request("/valid-path") + assert response["success"] is False + assert response["message"] == "Request timeout." + +@pytest.mark.asyncio +async def test_async_request_client_error(mock_emon_request): + """Test async_request handling of ClientError.""" + with patch("aiohttp.ClientSession.get", side_effect=ClientError("Mock client error")): + response = await mock_emon_request.async_request("/valid-path") + assert response["success"] is False + assert "client error" in response["message"] + +@pytest.mark.asyncio +async def test_close_session(mock_emon_request): + """Test closing of the aiohttp session.""" + session_mock = AsyncMock() + mock_emon_request._session = session_mock + mock_emon_request._close_session = True + await mock_emon_request.close() + session_mock.close.assert_awaited_once() + +@pytest.mark.asyncio +async def test_get_session_creation(mock_emon_request): + """Test session creation when no session exists.""" + assert mock_emon_request._session is None + session = mock_emon_request.session + assert session is not None + assert isinstance(session, ClientSession) + +@pytest.mark.asyncio +async def test_get_session_reuse(mock_emon_request): + """Test reusing an existing session.""" + session = mock_emon_request.session + assert mock_emon_request.session is session + +@pytest.mark.asyncio +async def test_validate_api_key_valid(mock_emon_request): + """Test API key validation with a valid key.""" + validated_key = mock_emon_request._validate_api_key(API_KEY) + assert validated_key == API_KEY + +@pytest.mark.asyncio +async def test_validate_api_key_invalid(mock_emon_request): + """Test API key validation with an invalid key.""" + with pytest.raises(ValueError, match="API key must be a non-empty alphanumeric string."): + mock_emon_request._validate_api_key(INVALID_API_KEY) + +@pytest.mark.asyncio +async def test_validate_url_valid(mock_emon_request): + """Test URL validation with a valid URL.""" + validated_url = mock_emon_request._sanitize_url(VALID_URL) + assert validated_url == VALID_URL + +@pytest.mark.asyncio +async def test_validate_url_invalid(mock_emon_request): + """Test URL validation with an invalid URL.""" + with pytest.raises(ValueError, match="URL must be a non-empty string."): + mock_emon_request._sanitize_url("") diff --git a/tests/test_fina_plot.py b/tests/test_fina_plot.py index 3876fa9..9b750c1 100644 --- a/tests/test_fina_plot.py +++ b/tests/test_fina_plot.py @@ -6,14 +6,12 @@ matplotlib. """ # pylint: disable=unused-argument,protected-access,unused-import -from unittest.mock import patch, mock_open, MagicMock -from struct import pack +# flake8: disable=F401 +from unittest.mock import patch import numpy as np import pandas as pd import pytest from emon_tools.fina_time_series import FinaDataFrame -from emon_tools.fina_time_series import FinaDfStats -from emon_tools.fina_plot import FinaPlot from emon_tools.fina_plot import PlotData from emon_tools.fina_plot import PlotStats @@ -28,7 +26,8 @@ def sample_data(self): """ Provide a fixture for sample time and value data. """ - times = np.array([1640995200, 1640995300, 1640995400]) # Example timestamps + # Example timestamps + times = np.array([1640995200, 1640995300, 1640995400]) values = np.array([1.5, 2.3, 3.7]) # Example values return np.vstack((times, values)).T @@ -37,7 +36,8 @@ def sample_data_frame(self): """ Provide a fixture for sample time and value data. """ - times = np.array([1640995200, 1640995300, 1640995400]) # Example timestamps + # Example timestamps + times = np.array([1640995200, 1640995300, 1640995400]) values = np.array([1.5, 2.3, 3.7]) # Example values return FinaDataFrame.set_data_frame(times, values) @@ -105,7 +105,8 @@ def sample_df_values(self): """ Provide a fixture for sample time and value data. """ - times = np.array([1640995200, 1640995300, 1640995400]) # Example timestamps + # Example timestamps + times = np.array([1640995200, 1640995300, 1640995400]) mins = np.array([1.5, 2.3, 3.7]) means = np.array([2, 3.3, 4.7]) maxs = np.array([3, 4.3, 5.7]) @@ -134,7 +135,11 @@ def test_plot_values_without_pandas(self, mock_show, sample_data_values): mock_show.assert_called_once() @patch("emon_tools.fina_plot.plt.show") - def test_plot_integrity_with_pandas(self, mock_show, sample_data_integrity): + def test_plot_integrity_with_pandas( + self, + mock_show, + sample_data_integrity + ): """ Test the plot method when pandas and matplotlib are available. """ @@ -142,7 +147,11 @@ def test_plot_integrity_with_pandas(self, mock_show, sample_data_integrity): mock_show.assert_called_once() @patch("emon_tools.fina_plot.plt.show") - def test_plot_integrity_without_pandas(self, mock_show, sample_df_integrity): + def test_plot_integrity_without_pandas( + self, + mock_show, + sample_df_integrity + ): """ Test the plot method when only matplotlib is available. """ diff --git a/tests/test_fina_reader.py b/tests/test_fina_reader.py index 2f43f65..caf1d1f 100644 --- a/tests/test_fina_reader.py +++ b/tests/test_fina_reader.py @@ -21,7 +21,8 @@ class TestFinaReader: @pytest.fixture def tmp_path_override(self, tmp_path): """ - Provide a fixture for a valid temporary path to simulate data directory. + Provide a fixture for a valid temporary path + to simulate data directory. """ data_dir = tmp_path / "test_data" data_dir.mkdir() @@ -48,7 +49,8 @@ def test_initialization_invalid_feed_id(self, tmp_path_override, feed_id): """ Test initializing FinaReader with invalid feed_id values. """ - with pytest.raises(ValueError, match="feed_id must be a positive integer."): + match_error = "feed_id must be a positive integer." + with pytest.raises(ValueError, match=match_error): FinaReader(feed_id=feed_id, data_dir=tmp_path_override) def test_initialization_invalid_data_dir(self): @@ -66,30 +68,35 @@ def test_setters(self, valid_fina_reader, tmp_path_override): valid_fina_reader.feed_id = 2 assert valid_fina_reader.feed_id == 2 - with pytest.raises(ValueError, match="feed_id must be a positive integer."): + match_error = "feed_id must be a positive integer." + with pytest.raises(ValueError, match=match_error): valid_fina_reader.feed_id = -1 # data_dir setter valid_fina_reader.data_dir = tmp_path_override assert valid_fina_reader.data_dir == tmp_path_override - with pytest.raises(ValueError, match="data_dir must be a valid directory."): + match_error = "data_dir must be a valid directory." + with pytest.raises(ValueError, match=match_error): valid_fina_reader.data_dir = "invalid_dir" # pos setter valid_fina_reader.pos = 10 assert valid_fina_reader.pos == 10 - with pytest.raises(ValueError, match="pos must be a positive integer."): + match_error = "pos must be a positive integer." + with pytest.raises(ValueError, match=match_error): valid_fina_reader.pos = -1 @patch("emon_tools.fina_reader.isfile", return_value=False) def test_get_meta_path_invalid(self, mock_isfile, valid_fina_reader): """ - Test that _get_meta_path raises FileNotFoundError when the meta file does not exist. + Test that _get_meta_path raises FileNotFoundError + when the meta file does not exist. """ # Simulate accessing the meta file path - with pytest.raises(FileNotFoundError, match="Meta file does not exist"): + match_error = "Meta file does not exist" + with pytest.raises(FileNotFoundError, match=match_error): valid_fina_reader._get_meta_path() # Assert that os.path.isfile was called with the expected file path @@ -120,7 +127,9 @@ def test_read_meta(self, assert meta.end_time == 1000990 # Invalid interval - @patch("builtins.open", new_callable=mock_open, read_data=pack("<2I", 0, 1000000)) + @patch("builtins.open", + new_callable=mock_open, + read_data=pack("<2I", 0, 1000000)) @patch("emon_tools.fina_reader.isfile", return_value=True) @patch("emon_tools.fina_reader.getsize", return_value=400) def test_read_meta_invalid(self, @@ -131,9 +140,11 @@ def test_read_meta_invalid(self, """ Test reading invalid metadata from the meta file. """ + match_error = ("Error reading meta file: " + "interval must be a positive integer.") with pytest.raises( - OSError, - match="Error reading meta file: interval must be a positive integer."): + OSError, + match=match_error): valid_fina_reader.read_meta() # Less than 8 bytes @@ -160,7 +171,9 @@ def test_read_meta_corrupted_meta_file( "rb" ) - @patch("builtins.open", new_callable=mock_open, read_data=pack("