diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f6dd4c6..c50aa1b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,15 +11,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Ability to modify Database Jobs - New classes to interact with the Partition API - - [pyslurm.Partition](https://pyslurm.github.io/23.2/reference/partition/#pyslurm.Partition) - - [pyslurm.Partitions](https://pyslurm.github.io/23.2/reference/partition/#pyslurm.Partitions) + - [pyslurm.Partition][] + - [pyslurm.Partitions][] - New attributes for a Database Job: - - extra - - failed_node -- Now possible to initialize a [pyslurm.db.Jobs][] collection with existing job - ids or pyslurm.db.Job objects -- Added `as_dict` function to all Collections + - `extra` + - `failed_node` - Added a new Base Class [MultiClusterMap][pyslurm.xcollections.MultiClusterMap] that some Collections inherit from. +- Added `to_json` function to all Collections ### Fixed @@ -29,9 +27,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - the Job was older than a day ### Changed - -- `JobSearchFilter` has been renamed to `JobFilter` -- Renamed `as_dict` Function of some classes to `to_dict` + +- Improved Docs +- Renamed `JobSearchFilter` to [pyslurm.db.JobFilter][] +- Renamed `as_dict` function of some classes to `to_dict` ## [23.2.1](https://github.com/PySlurm/pyslurm/releases/tag/v23.2.1) - 2023-05-18 diff --git a/docs/reference/index.md b/docs/reference/index.md index 35a6c678..af0ef05e 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -25,7 +25,7 @@ The `pyslurm` package is a wrapper around the Slurm C-API it! -## Functionality already reworked: +## Reworked Classes * Job API * [pyslurm.Job][] diff --git a/pyslurm/core/job/job.pxd b/pyslurm/core/job/job.pxd index 4eb89bde..616db4c9 100644 --- a/pyslurm/core/job/job.pxd +++ b/pyslurm/core/job/job.pxd @@ -69,10 +69,10 @@ cdef class Jobs(MultiClusterMap): """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Job][] objects. Args: - jobs (Union[list, dict], optional=None): + jobs (Union[list[int], dict[int, pyslurm.Job], str], optional=None): Jobs to initialize this collection with. frozen (bool, optional=False): - Control whether this collection is "frozen" when reloading Job + Control whether this collection is `frozen` when reloading Job information. Attributes: diff --git a/pyslurm/core/job/job.pyx b/pyslurm/core/job/job.pyx index e2915608..8ccc7f66 100644 --- a/pyslurm/core/job/job.pyx +++ b/pyslurm/core/job/job.pyx @@ -89,6 +89,14 @@ cdef class Jobs(MultiClusterMap): Raises: RPCError: When getting all the Jobs from the slurmctld failed. + + Examples: + >>> import pyslurm + >>> jobs = pyslurm.Jobs.load() + >>> print(jobs) + pyslurm.Jobs({1: pyslurm.Job(1), 2: pyslurm.Job(2)}) + >>> print(jobs[1]) + pyslurm.Job(1) """ cdef: dict passwd = {} @@ -134,6 +142,9 @@ cdef class Jobs(MultiClusterMap): def reload(self): """Reload the information for jobs in a collection. + Returns: + (pyslurm.Partitions): Returns self + Raises: RPCError: When getting the Jobs from the slurmctld failed. """ @@ -203,7 +214,7 @@ cdef class Job: self._dealloc_impl() def __repr__(self): - return f'{self.__class__.__name__}({self.id})' + return f'pyslurm.{self.__class__.__name__}({self.id})' @staticmethod def load(job_id): @@ -233,15 +244,13 @@ cdef class Job: """ cdef: job_info_msg_t *info = NULL - Job wrap = Job.__new__(Job) + Job wrap = None try: verify_rpc(slurm_load_job(&info, job_id, slurm.SHOW_DETAIL)) if info and info.record_count: - # Copy info - wrap._alloc_impl() - memcpy(wrap.ptr, &info.job_array[0], sizeof(slurm_job_info_t)) + wrap = Job.from_ptr(&info.job_array[0]) info.record_count = 0 if not slurm.IS_JOB_PENDING(wrap.ptr): diff --git a/pyslurm/core/job/step.pyx b/pyslurm/core/job/step.pyx index 54cb8f59..4227e901 100644 --- a/pyslurm/core/job/step.pyx +++ b/pyslurm/core/job/step.pyx @@ -59,6 +59,10 @@ cdef class JobSteps(dict): elif steps is not None: raise TypeError("Invalid Type: {type(steps)}") + def __repr__(self): + data = super().__repr__() + return f'pyslurm.{self.__class__.__name__}({data})' + @staticmethod def load(job): """Load the Job Steps from the system. @@ -69,6 +73,14 @@ cdef class JobSteps(dict): Returns: (pyslurm.JobSteps): JobSteps of the Job + + Examples: + >>> import pyslurm + >>> steps = pyslurm.JobSteps.load(1) + >>> print(steps) + pyslurm.JobSteps({'batch': pyslurm.JobStep('batch')}) + >>> print(steps[1]) + pyslurm.JobStep('batch') """ cdef: Job _job @@ -187,7 +199,7 @@ cdef class JobStep: JobStep.__dict__[name].__set__(self, val) def __repr__(self): - return f'{self.__class__.__name__}({self.id})' + return f'pyslurm.{self.__class__.__name__}({self.id})' @staticmethod def load(job_id, step_id): @@ -214,7 +226,7 @@ cdef class JobStep: """ cdef: job_step_info_response_msg_t *info = NULL - JobStep wrap = JobStep.__new__(JobStep) + JobStep wrap = None job_id = job_id.id if isinstance(job_id, Job) else job_id rc = slurm_get_job_steps(0, job_id, dehumanize_step_id(step_id), @@ -222,9 +234,7 @@ cdef class JobStep: verify_rpc(rc) if info and info.job_step_count == 1: - # Copy new info - wrap._alloc_impl() - memcpy(wrap.ptr, &info.job_steps[0], sizeof(job_step_info_t)) + wrap = JobStep.from_ptr(&info.job_steps[0]) info.job_step_count = 0 slurm_free_job_step_info_response_msg(info) else: diff --git a/pyslurm/core/job/submission.pxd b/pyslurm/core/job/submission.pxd index fdedc8ed..1005a24e 100644 --- a/pyslurm/core/job/submission.pxd +++ b/pyslurm/core/job/submission.pxd @@ -495,9 +495,9 @@ cdef class JobSubmitDescription: standard_in (str): Path to a File acting as standard_in for the batch-script. This is the same as -i/--input from sbatch. - standard_in (str): - Path to a File acting as standard_in for the batch-script. - This is the same as -i/--input from sbatch. + standard_error (str): + Path to a File acting as standard_error for the batch-script. + This is the same as -e/--error from sbatch. standard_output (str): Path to a File to write the Jobs standard_output. This is the same as -o/--output from sbatch. diff --git a/pyslurm/core/job/submission.pyx b/pyslurm/core/job/submission.pyx index df33992b..0c9e699c 100644 --- a/pyslurm/core/job/submission.pyx +++ b/pyslurm/core/job/submission.pyx @@ -73,6 +73,9 @@ cdef class JobSubmitDescription: slurm_init_job_desc_msg(self.ptr) + def __repr__(self): + return f'pyslurm.{self.__class__.__name__}' + def submit(self): """Submit a batch job description. @@ -87,9 +90,12 @@ cdef class JobSubmitDescription: >>> desc = pyslurm.JobSubmitDescription( ... name="test-job", ... cpus_per_task=1, - ... time_limit="10-00:00:00") + ... time_limit="10-00:00:00", + ... script="/path/to/your/submit_script.sh") >>> >>> job_id = desc.submit() + >>> print(job_id) + 99 """ cdef submit_response_msg_t *resp = NULL @@ -112,9 +118,9 @@ cdef class JobSubmitDescription: Args: overwrite (bool): - If set to True, the value from an option found in the + If set to `True`, the value from an option found in the environment will override the current value of the attribute - in this instance. Default is False + in this instance. Default is `False` Examples: Lets consider you want to set the name of the Job, its Account @@ -141,13 +147,13 @@ cdef class JobSubmitDescription: self._parse_env(overwrite) def load_sbatch_options(self, overwrite=False): - """Load values from #SBATCH options in the batch script. + """Load values from `#SBATCH` options in the batch script. Args: overwrite (bool): - If set to True, the value from an option found in the in the + If set to `True`, the value from an option found in the in the batch script will override the current value of the attribute - in this instance. Default is False + in this instance. Default is `False` """ if not self.script: raise ValueError("You need to set the 'script' attribute first.") diff --git a/pyslurm/core/node.pxd b/pyslurm/core/node.pxd index 5167de78..d889b723 100644 --- a/pyslurm/core/node.pxd +++ b/pyslurm/core/node.pxd @@ -62,7 +62,7 @@ cdef class Nodes(MultiClusterMap): """A [`Multi Cluster`][pyslurm.xcollections.MultiClusterMap] collection of [pyslurm.Node][] objects. Args: - nodes (Union[list, dict, str], optional=None): + nodes (Union[list[str], dict[str, Node], str], optional=None): Nodes to initialize this collection with. Attributes: @@ -161,8 +161,7 @@ cdef class Node: free_memory (int): Free Memory in Mebibytes on the node. memory_reserved_for_system (int): - Raw Memory in Mebibytes reserved for the System not usable by - Jobs. + Memory in Mebibytes reserved for the System not usable by Jobs. temporary_disk (int): Amount of temporary disk space this node has, in Mebibytes. weight (int): @@ -210,9 +209,10 @@ cdef class Node: external_sensors (dict): External Sensor info for the Node. The dict returned contains the following information: - * joules_total (int) - * current_watts (int) - * temperature (int) + + * `joules_total` (int) + * `current_watts` (int) + * `temperature` (int) state (str): State the node is currently in. next_state (str): diff --git a/pyslurm/core/node.pyx b/pyslurm/core/node.pyx index eac1bfef..5f8c7e2d 100644 --- a/pyslurm/core/node.pyx +++ b/pyslurm/core/node.pyx @@ -121,13 +121,16 @@ cdef class Nodes(MultiClusterMap): return nodes def reload(self): - """Reload the information for nodes in a collection. + """Reload the information for Nodes in a collection. !!! note Only information for nodes which are already in the collection at the time of calling this method will be reloaded. + Returns: + (pyslurm.Nodes): Returns self + Raises: RPCError: When getting the Nodes from the slurmctld failed. """ @@ -246,7 +249,7 @@ cdef class Node: Node.__dict__[name].__set__(self, val) def __repr__(self): - return f'{self.__class__.__name__}({self.name})' + return f'pyslurm.{self.__class__.__name__}({self.name})' @staticmethod cdef Node from_ptr(node_info_t *in_ptr): @@ -271,6 +274,10 @@ cdef class Node: Implements the slurm_load_node_single RPC. + Args: + name (str): + The name of the Node to load. + Returns: (pyslurm.Node): Returns a new Node instance. @@ -285,7 +292,7 @@ cdef class Node: cdef: node_info_msg_t *node_info = NULL partition_info_msg_t *part_info = NULL - Node wrap = Node.__new__(Node) + Node wrap = None try: verify_rpc(slurm_load_node_single(&node_info, @@ -294,9 +301,7 @@ cdef class Node: slurm_populate_node_partitions(node_info, part_info) if node_info and node_info.record_count: - # Copy info - wrap._alloc_impl() - memcpy(wrap.info, &node_info.node_array[0], sizeof(node_info_t)) + wrap = Node.from_ptr(&node_info.node_array[0]) node_info.record_count = 0 else: raise RPCError(msg=f"Node '{name}' does not exist") diff --git a/pyslurm/core/partition.pyx b/pyslurm/core/partition.pyx index e1a1b6b1..ba0bf559 100644 --- a/pyslurm/core/partition.pyx +++ b/pyslurm/core/partition.pyx @@ -186,7 +186,7 @@ cdef class Partition: self._dealloc_impl() def __repr__(self): - return f'{self.__class__.__name__}({self.name})' + return f'pyslurm.{self.__class__.__name__}({self.name})' @staticmethod cdef Partition from_ptr(partition_info_t *in_ptr): diff --git a/pyslurm/db/assoc.pyx b/pyslurm/db/assoc.pyx index 4e535a46..93617669 100644 --- a/pyslurm/db/assoc.pyx +++ b/pyslurm/db/assoc.pyx @@ -206,7 +206,7 @@ cdef class Association: slurmdb_init_assoc_rec(self.ptr, 0) def __repr__(self): - return f'{self.__class__.__name__}({self.id})' + return f'pyslurm.db.{self.__class__.__name__}({self.id})' @staticmethod cdef Association from_ptr(slurmdb_assoc_rec_t *in_ptr): diff --git a/pyslurm/db/connection.pyx b/pyslurm/db/connection.pyx index 67ef7603..935f921a 100644 --- a/pyslurm/db/connection.pyx +++ b/pyslurm/db/connection.pyx @@ -48,6 +48,10 @@ cdef class Connection: def __dealloc__(self): self.close() + def __repr__(self): + state = "open" if self.is_open else "closed" + return f'pyslurm.db.{self.__class__.__name__} is {state}' + @staticmethod def open(): """Open a new connection to the slurmdbd @@ -61,6 +65,8 @@ cdef class Connection: Examples: >>> import pyslurm >>> connection = pyslurm.db.Connection.open() + >>> print(connection.is_open) + True """ cdef Connection conn = Connection.__new__(Connection) conn.ptr = slurmdb_connection_get(&conn.flags) @@ -77,6 +83,8 @@ cdef class Connection: >>> connection = pyslurm.db.Connection.open() >>> ... >>> connection.close() + >>> print(connection.is_open) + False """ if self.is_open: slurmdb_connection_close(&self.ptr) diff --git a/pyslurm/db/job.pxd b/pyslurm/db/job.pxd index bf21c003..70ef0311 100644 --- a/pyslurm/db/job.pxd +++ b/pyslurm/db/job.pxd @@ -163,7 +163,8 @@ cdef class Job: job_id (int, optional=0): An Integer representing a Job-ID. cluster (str, optional=None): - Name of the Cluster for this Job. + Name of the Cluster for this Job. Default is the name of the local + Cluster. Other Parameters: admin_comment (str): diff --git a/pyslurm/db/job.pyx b/pyslurm/db/job.pyx index 905f206a..6679a77c 100644 --- a/pyslurm/db/job.pyx +++ b/pyslurm/db/job.pyx @@ -192,7 +192,7 @@ cdef class Jobs(MultiClusterMap): def __init__(self, jobs=None): super().__init__(data=jobs, - typ="Jobs", + typ="db.Jobs", val_type=Job, id_attr=Job.id, key_type=int) @@ -208,7 +208,8 @@ cdef class Jobs(MultiClusterMap): A search filter that the slurmdbd will apply when retrieving Jobs from the database. db_connection (pyslurm.db.Connection): - An open database connection. + An open database connection. By default if none is specified, + one will be opened automatically. Returns: (pyslurm.db.Jobs): A Collection of database Jobs. @@ -223,6 +224,10 @@ cdef class Jobs(MultiClusterMap): >>> import pyslurm >>> db_jobs = pyslurm.db.Jobs.load() + >>> print(db_jobs) + pyslurm.db.Jobs({1: pyslurm.db.Job(1), 2: pyslurm.db.Job(2)}) + >>> print(db_jobs[1]) + pyslurm.db.Job(1) Now with a Job Filter, so only Jobs that have specific Accounts are returned: @@ -339,13 +344,20 @@ cdef class Jobs(MultiClusterMap): >>> changes = pyslurm.db.Job(comment="A comment for the job") >>> modified_jobs = pyslurm.db.Jobs.modify( ... db_filter, changes, db_conn) - >>> - >>> # Now you can first examine which Jobs have been modified + + Now you can first examine which Jobs have been modified: + >>> print(modified_jobs) [9999] - >>> # And then you can actually commit (or even rollback) the - >>> # changes + + And then you can actually commit the changes: + >>> db_conn.commit() + + You can also explicitly rollback these changes instead of + committing, so they will not become active: + + >>> db_conn.rollback() """ cdef: JobFilter cond @@ -444,7 +456,8 @@ cdef class Job: job_id (int): ID of the Job to be loaded. cluster (str): - Name of the Cluster to search in. + Name of the Cluster to search in. Default is the local + Cluster. with_script (bool): Whether the Job-Script should also be loaded. Mutually exclusive with `with_env`. @@ -520,7 +533,7 @@ cdef class Job: return out def __repr__(self): - return f'{self.__class__.__name__}({self.id})' + return f'pyslurm.db.{self.__class__.__name__}({self.id})' def modify(self, changes, db_connection=None): """Modify a Slurm database Job. diff --git a/pyslurm/db/qos.pyx b/pyslurm/db/qos.pyx index 299c0ed9..09819611 100644 --- a/pyslurm/db/qos.pyx +++ b/pyslurm/db/qos.pyx @@ -157,7 +157,7 @@ cdef class QualityOfService: return wrap def __repr__(self): - return f'{self.__class__.__name__}({self.name})' + return f'pyslurm.db.{self.__class__.__name__}({self.name})' def to_dict(self): """Database QualityOfService information formatted as a dictionary. diff --git a/pyslurm/db/step.pyx b/pyslurm/db/step.pyx index e39af066..ee809f01 100644 --- a/pyslurm/db/step.pyx +++ b/pyslurm/db/step.pyx @@ -37,6 +37,13 @@ from pyslurm.utils.helpers import ( from pyslurm.core.job.util import cpu_freq_int_to_str +cdef class JobSteps(dict): + + def __repr__(self): + data = super().__repr__() + return f'pyslurm.db.{self.__class__.__name__}({data})' + + cdef class JobStep: def __cinit__(self): @@ -67,6 +74,9 @@ cdef class JobStep: out["stats"] = self.stats.to_dict() return out + def __repr__(self): + return f'pyslurm.db.{self.__class__.__name__}({self.id})' + @property def num_nodes(self): nnodes = u32_parse(self.ptr.nnodes) diff --git a/pyslurm/xcollections.pxd b/pyslurm/xcollections.pxd index 24007da7..98dfa713 100644 --- a/pyslurm/xcollections.pxd +++ b/pyslurm/xcollections.pxd @@ -35,25 +35,26 @@ cdef class MultiClusterMap: This class enables collections to hold data from multiple Clusters if applicable. For quite a few Entities in Slurm it is possible to gather data from - multiple Clusters. For example, with `squeue`, you can easily list Jobs + multiple Clusters. For example, with `sacct`, you can easily query Jobs running on different Clusters - provided your Cluster is joined in a Federation or simply part of a multi Cluster Setup. - Collections like `pyslurm.Jobs` inherit from this Class to enable holding - such data from multiple Clusters. - Internally, the data is structured in a `dict` like this (with - `pyslurm.Jobs` as an example): + Collections like [pyslurm.db.Jobs][] inherit from this Class to enable + holding such data from multiple Clusters. Internally, the data is + structured in a `dict` like this (with [pyslurm.db.Jobs][] as an example): ```python data = { - "LOCAL_CLUSTER": - 1: pyslurm.Job, - 2: pyslurm.Job, + "LOCAL_CLUSTER": { + 1: pyslurm.db.Job(1), + 2: pyslurm.db.Job(2), ... - "OTHER_REMOTE_CLUSTER": - 100: pyslurm.Job, - 101, pyslurm.Job + }, + "OTHER_REMOTE_CLUSTER": { + 100: pyslurm.db.Job(100), + 101, pyslurm.db.Job(101) ... + }, ... } ``` @@ -70,15 +71,18 @@ cdef class MultiClusterMap: job = data[1] ``` - `job` would then hold the instance for Job 1 from the `LOCAL_CLUSTER` - data. + `job` would then hold the instance for `pyslurm.db.Job(1)` from the + `LOCAL_CLUSTER` data. + Alternatively, data can also be accessed like this: ```python job = data["OTHER_REMOTE_CLUSTER"][100] ``` - Here, you are directly specifying which Cluster data you want to access. + Here, you are directly specifying which Cluster data you want to access, + and you will get the instance for `pyslurm.db.Job(100)` from the + `OTHER_REMOTE_CLUSTER` data. Similarly, every method (where applicable) from a standard dict is extended with multi-cluster functionality (check out the examples on the diff --git a/pyslurm/xcollections.pyx b/pyslurm/xcollections.pyx index 8be67d29..b483cb40 100644 --- a/pyslurm/xcollections.pyx +++ b/pyslurm/xcollections.pyx @@ -251,6 +251,26 @@ cdef class MultiClusterMap: def _item_id(self, item): return self._id_attr.__get__(item) + def _iter_clusters_dict(self, other): + for key in other: + try: + iterator = iter(other[key]) + except TypeError as e: + try: + cluster = self._get_cluster() + except KeyError: + cluster = LOCAL_CLUSTER + + if not cluster in self.data: + self.data[cluster] = {} + yield (cluster, other) + break + else: + cluster = key + if not cluster in self.data: + self.data[cluster] = {} + yield (cluster, other[cluster]) + def __eq__(self, other): if isinstance(other, self.__class__): return self.data == other.data @@ -281,7 +301,8 @@ cdef class MultiClusterMap: return sum(len(data) for data in self.data.values()) def __repr__(self): - return f'{self._typ}([{", ".join(map(repr, self.values()))}])' + data = ", ".join(map(repr, self.data.values())) + return f'pyslurm.{self._typ}({data})' def __contains__(self, item): if isinstance(item, self._val_type): @@ -309,6 +330,44 @@ cdef class MultiClusterMap: def __copy__(self): return self.copy() + def __or__(self, other): + if isinstance(other, MultiClusterMap): + if isinstance(self, dict): + return NotImplemented + + out = self.copy() + out |= other + return out + elif isinstance(other, dict): + out = self.copy() + for cluster, data in self._iter_clusters_dict(other): + out.data[cluster] = self.data[cluster] | data + return out + return NotImplemented + + def __ror__(self, other): + if isinstance(other, MultiClusterMap): + out = other.copy() + out |= self + return out + elif isinstance(other, dict): + out = self.copy() + for cluster, data in self._iter_clusters_dict(other): + out.data[cluster] = data | self.data[cluster] + return out + return NotImplemented + + def __ior__(self, other): + if isinstance(other, MultiClusterMap): + for cluster in other.clusters(): + if not cluster in self.data: + self.data[cluster] = {} + self.data[cluster] |= other.data[cluster] + else: + for cluster, data in self._iter_clusters_dict(other): + self.data[cluster] |= data + return self + def copy(self): """Return a Copy of this instance.""" out = self.__class__.__new__(self.__class__) @@ -353,9 +412,9 @@ cdef class MultiClusterMap: """An Item to add to the collection Note that a collection can only hold its specific type. - For example, a collection of `pyslurm.Jobs` can only hold - `pyslurm.Job` objects. Trying to add anything other than the accepted - type will raise a TypeError. + For example, a collection of [pyslurm.db.Jobs][] can only hold + [pyslurm.db.Job][] objects. Trying to add anything other than the + accepted type will raise a TypeError. Args: item (Any): @@ -366,13 +425,15 @@ cdef class MultiClusterMap: the collection was added. Examples: - Add a `pyslurm.Job` instance to the `Jobs` collection. - - >>> data = pyslurm.Jobs() - >>> job = pyslurm.Job(1) - >>> data.add(job) - >>> print(data) - Jobs([Job(1)]) + Add a `pyslurm.db.Job` instance to the `pyslurm.db.Jobs` + collection. + + >>> import pyslurm + >>> jobs = pyslurm.db.Jobs() + >>> job = pyslurm.db.Job(1) + >>> jobs.add(job) + >>> print(jobs) + pyslurm.db.Jobs({1: pyslurm.db.Job(1)}) """ if item.cluster not in self.data: self.data[item.cluster] = {} @@ -500,37 +561,16 @@ cdef class MultiClusterMap: return item - def _update(self, data): - for key in data: - try: - iterator = iter(data[key]) - except TypeError as e: - cluster = self._get_cluster() - if not cluster in self.data: - self.data[cluster] = {} - self.data[cluster].update(data) - break - else: - cluster = key - if not cluster in self.data: - self.data[cluster] = {} - self.data[cluster].update(data[cluster]) -# col = data[cluster] -# if hasattr(col, "keys") and callable(col.keys): -# for k in col.keys(): - -# else: -# for item in col: -# k, v = item - - def update(self, data={}, **kwargs): """Update the collection. This functions like `dict`'s `update` method. """ - self._update(data) - self._update(kwargs) + for cluster, data in self._iter_clusters_dict(data): + self.data[cluster].update(data) + + for cluster, data in self._iter_clusters_dict(kwargs): + self.data[cluster].update(data) def multi_reload(cur, frozen=True): diff --git a/tests/unit/test_collection.py b/tests/unit/test_collection.py index ccb27779..a29e4f86 100644 --- a/tests/unit/test_collection.py +++ b/tests/unit/test_collection.py @@ -37,8 +37,8 @@ def _create_collection(self): 2: pyslurm.db.Job(2), }, OTHER_CLUSTER: { - 1: pyslurm.db.Job(1, cluster="other_cluster"), - 10: pyslurm.db.Job(10, cluster="other_cluster"), + 1: pyslurm.db.Job(1, cluster=OTHER_CLUSTER), + 10: pyslurm.db.Job(10, cluster=OTHER_CLUSTER), } } col = pyslurm.db.Jobs() @@ -326,3 +326,73 @@ def cpus(self): expected = 0 assert sum_property(object_dict, TestObject.cpus) == expected + + def test_ior(self): + col = self._create_collection() + col_len = len(col) + + other_data = { + LOCAL_CLUSTER: { + 3: pyslurm.db.Job(3), + 2: pyslurm.db.Job(2), + }, + "test_cluster": { + 1000: pyslurm.db.Job(1000, cluster="test_cluster"), + 1001: pyslurm.db.Job(1001, cluster="test_cluster"), + } + } + other_col = pyslurm.db.Jobs() + other_col.update(other_data) + + col |= other_col + assert isinstance(col, pyslurm.xcollections.MultiClusterMap) + assert isinstance(col, pyslurm.db.Jobs) + assert len(col.clusters()) == 3 + assert len(col) == col_len+3 + + dict_data = { + 10: pyslurm.db.Job(10), + 11: pyslurm.db.Job(11), + } + + col |= dict_data + assert isinstance(col, pyslurm.xcollections.MultiClusterMap) + assert isinstance(col, pyslurm.db.Jobs) + assert len(col.clusters()) == 3 + assert len(col[LOCAL_CLUSTER]) == 5 + assert len(col) == col_len+5 + + def test_or(self): + col = self._create_collection() + col_len = len(col) + + other_data = { + LOCAL_CLUSTER: { + 3: pyslurm.db.Job(3), + 2: pyslurm.db.Job(2), + }, + "test_cluster": { + 1000: pyslurm.db.Job(1000, cluster="test_cluster"), + 1001: pyslurm.db.Job(1001, cluster="test_cluster"), + } + } + other_col = pyslurm.db.Jobs() + other_col.update(other_data) + + _col = col | other_col + assert isinstance(_col, pyslurm.xcollections.MultiClusterMap) + assert isinstance(_col, pyslurm.db.Jobs) + assert len(_col.clusters()) == 3 + assert len(_col) == col_len+3 + + dict_data = { + 10: pyslurm.db.Job(10), + 11: pyslurm.db.Job(11), + } + + _col = _col | dict_data + assert isinstance(_col, pyslurm.xcollections.MultiClusterMap) + assert isinstance(_col, pyslurm.db.Jobs) + assert len(_col.clusters()) == 3 + assert len(_col[LOCAL_CLUSTER]) == 5 + assert len(_col) == col_len+5