From 3d509aa59b134f6732c4d5967c85984edcc14fde Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 5 Dec 2024 12:27:53 +0100 Subject: [PATCH 01/94] Add draccus, create MainConfig --- lerobot/common/datasets/factory.py | 114 ++++----- lerobot/common/datasets/transforms.py | 115 ++++++++++ lerobot/common/envs/__init__.py | 1 + lerobot/common/envs/configs.py | 30 +++ lerobot/common/logger.py | 38 ++- lerobot/common/policies/__init__.py | 4 + .../common/policies/act/configuration_act.py | 17 +- .../diffusion/configuration_diffusion.py | 17 +- .../policies/tdmpc/configuration_tdmpc.py | 22 +- .../policies/vqbet/configuration_vqbet.py | 17 +- lerobot/configs/default.py | 217 ++++++++++++++++++ lerobot/configs/policies.py | 42 ++++ lerobot/scripts/train.py | 140 +++++------ poetry.lock | 88 ++++++- pyproject.toml | 1 + 15 files changed, 715 insertions(+), 148 deletions(-) create mode 100644 lerobot/common/envs/__init__.py create mode 100644 lerobot/common/envs/configs.py create mode 100644 lerobot/common/policies/__init__.py create mode 100644 lerobot/configs/default.py create mode 100644 lerobot/configs/policies.py diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index f6164ed1dc..c5919ccb7e 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -16,27 +16,46 @@ import logging import torch -from omegaconf import ListConfig, OmegaConf -from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, MultiLeRobotDataset -from lerobot.common.datasets.transforms import get_image_transforms +from lerobot.common.datasets.lerobot_dataset import ( + LeRobotDataset, + LeRobotDatasetMetadata, + MultiLeRobotDataset, +) +from lerobot.common.datasets.transforms import ImageTransforms +from lerobot.configs.default import MainConfig +from lerobot.configs.policies import PretrainedConfig +IMAGENET_STATS = { + "mean": [[[0.485]], [[0.456]], [[0.406]]], # (c,1,1) + "std": [[[0.229]], [[0.224]], [[0.225]]], # (c,1,1) +} -def resolve_delta_timestamps(cfg): + +def resolve_delta_timestamps( + cfg: PretrainedConfig, ds_meta: LeRobotDatasetMetadata +) -> dict[str, list] | None: """Resolves delta_timestamps config key (in-place) by using `eval`. Doesn't do anything if delta_timestamps is not specified or has already been resolve (as evidenced by the data type of its values). """ - delta_timestamps = cfg.training.get("delta_timestamps") - if delta_timestamps is not None: - for key in delta_timestamps: - if isinstance(delta_timestamps[key], str): - # TODO(rcadene, alexander-soare): remove `eval` to avoid exploit - cfg.training.delta_timestamps[key] = eval(delta_timestamps[key]) + delta_timestamps = {} + for key in ds_meta.features: + if key == "next.reward" and cfg.reward_delta_indices is not None: + delta_timestamps[key] = [i / ds_meta.fps for i in cfg.reward_delta_indices] + if key == "action" and cfg.action_delta_indices is not None: + delta_timestamps[key] = [i / ds_meta.fps for i in cfg.action_delta_indices] + if key.startswith("observation.") and cfg.observation_delta_indices is not None: + delta_timestamps[key] = [i / ds_meta.fps for i in cfg.observation_delta_indices] + + if len(delta_timestamps) == 0: + delta_timestamps = None + + return delta_timestamps -def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotDataset: +def make_dataset(cfg: MainConfig, split: str = "train") -> LeRobotDataset | MultiLeRobotDataset: """ Args: cfg: A Hydra config as per the LeRobot config scheme. @@ -50,67 +69,58 @@ def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotData Returns: The LeRobotDataset. """ - if not isinstance(cfg.dataset_repo_id, (str, ListConfig)): - raise ValueError( - "Expected cfg.dataset_repo_id to be either a single string to load one dataset or a list of " - "strings to load multiple datasets." - ) - # A soft check to warn if the environment matches the dataset. Don't check if we are using a real world env (dora). - if cfg.env.name != "dora": - if isinstance(cfg.dataset_repo_id, str): - dataset_repo_ids = [cfg.dataset_repo_id] # single dataset + if cfg.env.type != "dora": + if isinstance(cfg.dataset.repo_id, str): + dataset_repo_ids = [cfg.dataset.repo_id] # single dataset + elif isinstance(cfg.dataset.repo_id, list): + dataset_repo_ids = cfg.dataset.repo_id # multiple datasets else: - dataset_repo_ids = cfg.dataset_repo_id # multiple datasets + raise ValueError( + "Expected cfg.dataset.repo_id to be either a single string to load one dataset or a list of " + "strings to load multiple datasets." + ) for dataset_repo_id in dataset_repo_ids: - if cfg.env.name not in dataset_repo_id: + if cfg.env.type not in dataset_repo_id: logging.warning( f"There might be a mismatch between your training dataset ({dataset_repo_id=}) and your " - f"environment ({cfg.env.name=})." + f"environment ({cfg.env.type=})." ) - resolve_delta_timestamps(cfg) - - image_transforms = None - if cfg.training.image_transforms.enable: - cfg_tf = cfg.training.image_transforms - image_transforms = get_image_transforms( - brightness_weight=cfg_tf.brightness.weight, - brightness_min_max=cfg_tf.brightness.min_max, - contrast_weight=cfg_tf.contrast.weight, - contrast_min_max=cfg_tf.contrast.min_max, - saturation_weight=cfg_tf.saturation.weight, - saturation_min_max=cfg_tf.saturation.min_max, - hue_weight=cfg_tf.hue.weight, - hue_min_max=cfg_tf.hue.min_max, - sharpness_weight=cfg_tf.sharpness.weight, - sharpness_min_max=cfg_tf.sharpness.min_max, - max_num_transforms=cfg_tf.max_num_transforms, - random_order=cfg_tf.random_order, - ) + image_transforms = ( + ImageTransforms(cfg.dataset.image_transforms) if cfg.dataset.image_transforms.enable else None + ) - if isinstance(cfg.dataset_repo_id, str): + if isinstance(cfg.dataset.repo_id, str): # TODO (aliberts): add 'episodes' arg from config after removing hydra + ds_meta = LeRobotDatasetMetadata(cfg.dataset.repo_id, local_files_only=cfg.dataset.local_files_only) + delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta) dataset = LeRobotDataset( - cfg.dataset_repo_id, - delta_timestamps=cfg.training.get("delta_timestamps"), + cfg.dataset.repo_id, + episodes=cfg.dataset.episodes, + delta_timestamps=delta_timestamps, image_transforms=image_transforms, video_backend=cfg.video_backend, + local_files_only=cfg.dataset.local_files_only, ) else: dataset = MultiLeRobotDataset( - cfg.dataset_repo_id, - delta_timestamps=cfg.training.get("delta_timestamps"), + cfg.dataset.repo_id, + # TODO(aliberts): add proper support for multi dataset + # delta_timestamps=delta_timestamps, image_transforms=image_transforms, video_backend=cfg.video_backend, ) - if cfg.get("override_dataset_stats"): - for key, stats_dict in cfg.override_dataset_stats.items(): - for stats_type, listconfig in stats_dict.items(): - # example of stats_type: min, max, mean, std - stats = OmegaConf.to_container(listconfig, resolve=True) + if cfg.dataset.use_imagenet_stats: + for key in dataset.meta.camera_keys: + for stats_type, stats in IMAGENET_STATS.items(): dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) + # for key, stats_dict in cfg.override_dataset_stats.items(): + # for stats_type, listconfig in stats_dict.items(): + # # example of stats_type: min, max, mean, std + # stats = OmegaConf.to_container(listconfig, resolve=True) + # dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) return dataset diff --git a/lerobot/common/datasets/transforms.py b/lerobot/common/datasets/transforms.py index 899f0d66c9..696ccb02e8 100644 --- a/lerobot/common/datasets/transforms.py +++ b/lerobot/common/datasets/transforms.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import collections +from dataclasses import dataclass, field from typing import Any, Callable, Dict, Sequence import torch @@ -137,6 +138,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor) +# TODO(aliberts): Remove def get_image_transforms( brightness_weight: float = 1.0, brightness_min_max: tuple[float, float] | None = None, @@ -195,3 +197,116 @@ def check_value(name, weight, min_max): else: # TODO(rcadene, aliberts): add v2.ToDtype float16? return RandomSubsetApply(transforms, p=weights, n_subset=n_subset, random_order=random_order) + + +@dataclass +class ImageTransformConfig: + """ + For each transform, the following parameters are available: + weight: This represents the multinomial probability (with no replacement) + used for sampling the transform. If the sum of the weights is not 1, + they will be normalized. + type: The name of the class used. This is either a class available under torchvision.transforms.v2 or a + custom transform defined here. + kwargs: Lower & upper bound respectively used for sampling the transform's parameter + (following uniform distribution) when it's applied. + """ + + weight: int = 1.0 + type: str = "Identity" + kwargs: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class ImageTransformsConfig: + """ + These transforms are all using standard torchvision.transforms.v2 + You can find out how these transformations affect images here: + https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html + We use a custom RandomSubsetApply container to sample them. + """ + + # Set this flag to `true` to enable transforms during training + enable: bool = False + # This is the maximum number of transforms (sampled from these below) that will be applied to each frame. + # It's an integer in the interval [1, number_of_available_transforms]. + max_num_transforms: int = 3 + # By default, transforms are applied in Torchvision's suggested order (shown below). + # Set this to True to apply them in a random order. + random_order: bool = False + tfs: list[ImageTransformConfig] = field( + default_factory=lambda: [ + ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"brightness": (0.8, 1.2)}, + ), + ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"contrast": (0.8, 1.2)}, + ), + ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"saturation": (0.5, 1.5)}, + ), + ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"hue": (-0.05, 0.05)}, + ), + ImageTransformConfig( + weight=1.0, + type="SharpnessJitter", + kwargs={"sharpness": (0.5, 1.5)}, + ), + ] + ) + + +class ImageTransforms(Transform): + """A class to compose image transforms based on configuration.""" + + _registry = { + "Identity": v2.Identity, + "ColorJitter": v2.ColorJitter, + "SharpnessJitter": SharpnessJitter, + } + + def __init__(self, cfg: ImageTransformsConfig) -> None: + super().__init__() + self._cfg = cfg + + weights = [] + transforms = [] + for tf_cfg in cfg.tfs: + if tf_cfg.weight <= 0.0: + continue + + transform_cls = self._registry.get(tf_cfg.type) + if transform_cls is None: + available_transforms = ", ".join(self._registry.keys()) + raise ValueError( + f"Transform '{tf_cfg.type}' not found in the registry. " + f"Available transforms are: {available_transforms}" + ) + + # Instantiate the transform + transform_instance = transform_cls(**tf_cfg.kwargs) + transforms.append(transform_instance) + weights.append(tf_cfg.weight) + + n_subset = min(len(transforms), cfg.max_num_transforms) + if n_subset == 0 or not cfg.enable: + self.transform = v2.Identity() + else: + self.transform = RandomSubsetApply( + transforms=transforms, + p=weights, + n_subset=n_subset, + random_order=cfg.random_order, + ) + + def forward(self, *inputs: Any) -> Any: + return self.transform(*inputs) diff --git a/lerobot/common/envs/__init__.py b/lerobot/common/envs/__init__.py new file mode 100644 index 0000000000..490b487c0d --- /dev/null +++ b/lerobot/common/envs/__init__.py @@ -0,0 +1 @@ +from .configs import AlohaEnv, EnvConfig, RealEnv # noqa: F401 diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py new file mode 100644 index 0000000000..b4414dee65 --- /dev/null +++ b/lerobot/common/envs/configs.py @@ -0,0 +1,30 @@ +from dataclasses import dataclass + +import draccus + + +@dataclass +class EnvConfig(draccus.ChoiceRegistry): + task: str | None = None + state_dim: int = 18 + action_dim: int = 18 + fps: int = 30 + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +@EnvConfig.register_subclass("real_world") +@dataclass +class RealEnv(EnvConfig): + pass + + +@EnvConfig.register_subclass("aloha") +@dataclass +class AlohaEnv(EnvConfig): + task: str = "AlohaInsertion-v0" + state_dim: int = 14 + action_dim: int = 14 + fps: int = 50 diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 3bd2df89ab..b534c92b87 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -33,6 +33,7 @@ from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import get_global_random_state, set_global_random_state +from lerobot.configs.default import MainConfig def log_output_dir(out_dir): @@ -42,9 +43,9 @@ def log_output_dir(out_dir): def cfg_to_group(cfg: DictConfig, return_list: bool = False) -> list[str] | str: """Return a group name for logging. Optionally returns group name as list.""" lst = [ - f"policy:{cfg.policy.name}", - f"dataset:{cfg.dataset_repo_id}", - f"env:{cfg.env.name}", + f"policy:{cfg.policy.type}", + f"dataset:{cfg.dataset.repo_id}", + f"env:{cfg.env.type}", f"seed:{cfg.seed}", ] return lst if return_list else "-".join(lst) @@ -83,25 +84,18 @@ class Logger: pretrained_model_dir_name = "pretrained_model" training_state_file_name = "training_state.pth" - def __init__(self, cfg: DictConfig, log_dir: str, wandb_job_name: str | None = None): - """ - Args: - log_dir: The directory to save all logs and training outputs to. - job_name: The WandB job name. - """ + def __init__(self, cfg: MainConfig): self._cfg = cfg - self.log_dir = Path(log_dir) + self.log_dir = cfg.dir self.log_dir.mkdir(parents=True, exist_ok=True) - self.checkpoints_dir = self.get_checkpoints_dir(log_dir) - self.last_checkpoint_dir = self.get_last_checkpoint_dir(log_dir) - self.last_pretrained_model_dir = self.get_last_pretrained_model_dir(log_dir) + self.job_name = cfg.job_name + self.checkpoints_dir = self.get_checkpoints_dir(self.log_dir) + self.last_checkpoint_dir = self.get_last_checkpoint_dir(self.log_dir) + self.last_pretrained_model_dir = self.get_last_pretrained_model_dir(self.log_dir) # Set up WandB. self._group = cfg_to_group(cfg) - project = cfg.get("wandb", {}).get("project") - entity = cfg.get("wandb", {}).get("entity") - enable_wandb = cfg.get("wandb", {}).get("enable", False) - run_offline = not enable_wandb or not project + run_offline = not cfg.wandb.enable or not cfg.wandb.project if run_offline: logging.info(colored("Logs will be saved locally.", "yellow", attrs=["bold"])) self._wandb = None @@ -115,12 +109,12 @@ def __init__(self, cfg: DictConfig, log_dir: str, wandb_job_name: str | None = N wandb.init( id=wandb_run_id, - project=project, - entity=entity, - name=wandb_job_name, - notes=cfg.get("wandb", {}).get("notes"), + project=cfg.wandb.project, + entity=cfg.wandb.entity, + name=self.job_name, + notes=cfg.wandb.notes, tags=cfg_to_group(cfg, return_list=True), - dir=log_dir, + dir=self.log_dir, config=OmegaConf.to_container(cfg, resolve=True), # TODO(rcadene): try set to True save_code=False, diff --git a/lerobot/common/policies/__init__.py b/lerobot/common/policies/__init__.py new file mode 100644 index 0000000000..58db9849f3 --- /dev/null +++ b/lerobot/common/policies/__init__.py @@ -0,0 +1,4 @@ +from .act.configuration_act import ACTConfig as ACTConfig +from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig +from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig +from .vqbet.configuration_vqbet import VQBeTConfig as VQBeTConfig diff --git a/lerobot/common/policies/act/configuration_act.py b/lerobot/common/policies/act/configuration_act.py index a86c359c99..5c0d209340 100644 --- a/lerobot/common/policies/act/configuration_act.py +++ b/lerobot/common/policies/act/configuration_act.py @@ -15,9 +15,12 @@ # limitations under the License. from dataclasses import dataclass, field +from lerobot.configs.policies import PretrainedConfig + +@PretrainedConfig.register_subclass("act") @dataclass -class ACTConfig: +class ACTConfig(PretrainedConfig): """Configuration class for the Action Chunking Transformers policy. Defaults are configured for training on bimanual Aloha tasks like "insertion" or "transfer". @@ -169,3 +172,15 @@ def __post_init__(self): and "observation.environment_state" not in self.input_shapes ): raise ValueError("You must provide at least one image or the environment state among the inputs.") + + @property + def observation_delta_indices(self) -> None: + return None + + @property + def action_delta_indices(self) -> list: + return list(range(self.chunk_size)) + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/lerobot/common/policies/diffusion/configuration_diffusion.py b/lerobot/common/policies/diffusion/configuration_diffusion.py index 531f49e4d7..64a66dfd15 100644 --- a/lerobot/common/policies/diffusion/configuration_diffusion.py +++ b/lerobot/common/policies/diffusion/configuration_diffusion.py @@ -16,9 +16,12 @@ # limitations under the License. from dataclasses import dataclass, field +from lerobot.configs.policies import PretrainedConfig + +@PretrainedConfig.register_subclass("diffusion") @dataclass -class DiffusionConfig: +class DiffusionConfig(PretrainedConfig): """Configuration class for DiffusionPolicy. Defaults are configured for training with PushT providing proprioceptive and single camera observations. @@ -207,3 +210,15 @@ def __post_init__(self): "The horizon should be an integer multiple of the downsampling factor (which is determined " f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}" ) + + @property + def observation_delta_indices(self) -> list: + return list(range(1 - self.n_obs_steps, 1)) + + @property + def action_delta_indices(self) -> list: + return list(range(1 - self.n_obs_steps, 1 - self.n_obs_steps + self.horizon)) + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/lerobot/common/policies/tdmpc/configuration_tdmpc.py index 4a5415a156..7705a9c0cf 100644 --- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py +++ b/lerobot/common/policies/tdmpc/configuration_tdmpc.py @@ -16,9 +16,12 @@ # limitations under the License. from dataclasses import dataclass, field +from lerobot.configs.policies import PretrainedConfig + +@PretrainedConfig.register_subclass("tdmpc") @dataclass -class TDMPCConfig: +class TDMPCConfig(PretrainedConfig): """Configuration class for TDMPCPolicy. Defaults are configured for training with xarm_lift_medium_replay providing proprioceptive and single @@ -102,6 +105,7 @@ class TDMPCConfig: """ # Input / output structure. + n_obs_steps: int = 1 n_action_repeats: int = 2 horizon: int = 5 n_action_steps: int = 1 @@ -185,6 +189,10 @@ def __post_init__(self): f"advised that you stick with the default. See {self.__class__.__name__} docstring for more " "information." ) + if self.n_obs_steps != 1: + raise ValueError( + f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" + ) if self.n_action_steps > 1: if self.n_action_repeats != 1: raise ValueError( @@ -194,3 +202,15 @@ def __post_init__(self): raise ValueError("If `n_action_steps > 1`, `use_mpc` must be set to `True`.") if self.n_action_steps > self.horizon: raise ValueError("`n_action_steps` must be less than or equal to `horizon`.") + + @property + def observation_delta_indices(self) -> list: + return list(range(self.horizon + 1)) + + @property + def action_delta_indices(self) -> list: + return list(range(self.horizon)) + + @property + def reward_delta_indices(self) -> None: + return list(range(self.horizon)) diff --git a/lerobot/common/policies/vqbet/configuration_vqbet.py b/lerobot/common/policies/vqbet/configuration_vqbet.py index dfe4684d26..4d8ce94cfe 100644 --- a/lerobot/common/policies/vqbet/configuration_vqbet.py +++ b/lerobot/common/policies/vqbet/configuration_vqbet.py @@ -18,9 +18,12 @@ from dataclasses import dataclass, field +from lerobot.configs.policies import PretrainedConfig + +@PretrainedConfig.register_subclass("vqbet") @dataclass -class VQBeTConfig: +class VQBeTConfig(PretrainedConfig): """Configuration class for VQ-BeT. Defaults are configured for training with PushT providing proprioceptive and single camera observations. @@ -165,3 +168,15 @@ def __post_init__(self): f"`input_shapes[{image_key}]` does not match `input_shapes[{first_image_key}]`, but we " "expect all image shapes to match." ) + + @property + def observation_delta_indices(self) -> list: + return list(range(1 - self.n_obs_steps, 1)) + + @property + def action_delta_indices(self) -> list: + return list(range(1 - self.n_obs_steps, self.n_action_pred_token + self.action_chunk_size - 1)) + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/lerobot/configs/default.py b/lerobot/configs/default.py new file mode 100644 index 0000000000..54c2b9302e --- /dev/null +++ b/lerobot/configs/default.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime as dt +import logging +from dataclasses import dataclass, field +from pathlib import Path +from pprint import pformat + +import draccus +from deepdiff import DeepDiff + +from lerobot.common import ( + envs, # noqa: F401 + policies, # noqa: F401 +) +from lerobot.common.datasets.transforms import ImageTransformsConfig +from lerobot.configs.policies import PretrainedConfig + + +@dataclass +class OfflineConfig: + steps: int = 100_000 + + +@dataclass +class OnlineConfig: + """ + The online training look looks something like: + + ```python + for i in range(steps): + do_online_rollout_and_update_online_buffer() + for j in range(steps_between_rollouts): + batch = next(dataloader_with_offline_and_online_data) + loss = policy(batch) + loss.backward() + optimizer.step() + ``` + + Note that the online training loop adopts most of the options from the offline loop unless specified + otherwise. + """ + + steps: int = 0 + # How many episodes to collect at once when we reach the online rollout part of the training loop. + rollout_n_episodes: int = 1 + # The number of environments to use in the gym.vector.VectorEnv. This ends up also being the batch size for + # the policy. Ideally you should set this to by an even divisor or online_rollout_n_episodes. + rollout_batch_size: int = 1 + # How many optimization steps (forward, backward, optimizer step) to do between running rollouts. + steps_between_rollouts: int | None = None + # The proportion of online samples (vs offline samples) to include in the online training batches. + sampling_ratio: float = 0.5 + # First seed to use for the online rollout environment. Seeds for subsequent rollouts are incremented by 1. + env_seed: int | None = None + # Sets the maximum number of frames that are stored in the online buffer for online training. The buffer is + # FIFO. + buffer_capacity: int | None = None + # The minimum number of frames to have in the online buffer before commencing online training. + # If online_buffer_seed_size > online_rollout_n_episodes, the rollout will be run multiple times until the + # seed size condition is satisfied. + buffer_seed_size: int = 0 + # Whether to run the online rollouts asynchronously. This means we can run the online training steps in + # parallel with the rollouts. This might be advised if your GPU has the bandwidth to handle training + # + eval + environment rendering simultaneously. + do_online_rollout_async: bool = False + + +@dataclass +class TrainConfig: + # Number of workers for the dataloader. + num_workers: int = 4 + batch_size: int = 8 + eval_freq: int = 20_000 + log_freq: int = 200 + save_checkpoint: bool = True + # Checkpoint is saved every `save_freq` training iterations and after the last training step. + save_freq: int = 20_000 + offline: OfflineConfig = field(default_factory=OfflineConfig) + online: OnlineConfig = field(default_factory=OnlineConfig) + + +@dataclass +class DatasetConfig: + repo_id: str | list[str] + episodes: list[int] | None = None + image_transforms: ImageTransformsConfig = field(default_factory=ImageTransformsConfig) + local_files_only: bool = False + use_imagenet_stats: bool = True + + +@dataclass +class EvalConfig: + n_episodes: int = 50 + # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. + batch_size: int = 50 + # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). + use_async_envs: bool = False + + def __post_init__(self): + if self.batch_size > self.n_episodes: + raise ValueError( + "The eval batch size is greater than the number of eval episodes " + f"({self.batch_size} > {self.n_episodes}). As a result, {self.batch_size} " + f"eval environments will be instantiated, but only {self.n_episodes} will be used. " + "This might significantly slow down evaluation. To fix this, you should update your command " + f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={self.batch_size}`), " + f"or lower the batch size (e.g. `eval.batch_size={self.n_episodes}`)." + ) + + +@dataclass +class WandBConfig: + enable: bool = False + # Set to true to disable saving an artifact despite training.save_checkpoint=True + disable_artifact: bool = False + project: str = "lerobot" + entity: str | None = None + notes: str | None = None + + +@dataclass +class MainConfig: + policy: PretrainedConfig + dataset: DatasetConfig + env: envs.EnvConfig = envs.RealEnv + # Set `dir` to where you would like to save all of the run outputs. If you run another training session + # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. + dir: Path | None = None + job_name: str | None = None + # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure + # `dir` is the directory of an existing run with at least one checkpoint in it. + # Note that when resuming a run, the default behavior is to use the configuration from the checkpoint, + # regardless of what's provided with the training command at the time of resumption. + resume: bool = False + device: str = "cuda" # | cpu | mp + # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, + # automatic gradient scaling is used. + use_amp: bool = False + # `seed` is used for training (eg: model initialization, dataset shuffling) + # AND for the evaluation environments. + seed: int | None = None + # You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data + # keys common between the datasets are kept. Each dataset gets and additional transform that inserts the + # "dataset_index" into the returned item. The index mapping is made according to the order in which the + # datsets are provided. + video_backend: str = "pyav" + training: TrainConfig = field(default_factory=TrainConfig) + eval: EvalConfig = field(default_factory=EvalConfig) + wandb: WandBConfig = field(default_factory=WandBConfig) + + def __post_init__(self): + if not self.job_name: + self.job_name = f"{self.env.type}_{self.policy.type}" + + if not self.dir: + now = dt.datetime.now() + train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" + self.dir = Path("outputs/train") / train_dir + + if self.training.online.steps > 0 and isinstance(self.dataset.repo_id, list): + raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") + + # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need + # to check for any differences between the provided config and the checkpoint's config. + checkpoint_cfg_path = self.dir / "checkpoints/last/config.yaml" + if self.resume: + if not checkpoint_cfg_path.exists(): + raise RuntimeError( + f"You have set resume=True, but there is no model checkpoint in {self.dir}" + ) + + # Get the configuration file from the last checkpoint. + checkpoint_cfg = self.from_checkpoint(checkpoint_cfg_path) + + # # Check for differences between the checkpoint configuration and provided configuration. + # # Hack to resolve the delta_timestamps ahead of time in order to properly diff. + # resolve_delta_timestamps(cfg) + diff = DeepDiff(checkpoint_cfg, self) + # Ignore the `resume` and parameters. + if "values_changed" in diff and "root['resume']" in diff["values_changed"]: + del diff["values_changed"]["root['resume']"] + # Log a warning about differences between the checkpoint configuration and the provided + # configuration. + if len(diff) > 0: + logging.warning( + "At least one difference was detected between the checkpoint configuration and " + f"the provided configuration: \n{pformat(diff)}\nNote that the checkpoint configuration " + "takes precedence.", + ) + # Use the checkpoint config instead of the provided config (but keep `resume` parameter). + self = checkpoint_cfg + self.resume = True + + elif checkpoint_cfg_path.exists(): + raise RuntimeError( + f"The configured output directory {checkpoint_cfg_path} already exists. If " + "you meant to resume training, please use `resume=true` in your command or yaml configuration." + ) + + @classmethod + def from_checkpoint(cls, config_path: Path): + return draccus.load(cls, config_path) diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py new file mode 100644 index 0000000000..7c7d80c16c --- /dev/null +++ b/lerobot/configs/policies.py @@ -0,0 +1,42 @@ +from dataclasses import dataclass, field + +import draccus + + +@dataclass +class PretrainedConfig(draccus.ChoiceRegistry): + """ + Base configuration class for policy models. + + Args: + n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the + current step and additional steps going back). + input_shapes: A dictionary defining the shapes of the input data for the policy. + output_shapes: A dictionary defining the shapes of the output data for the policy. + input_normalization_modes: A dictionary with key representing the modality and the value specifies the + normalization mode to apply. + output_normalization_modes: Similar dictionary as `input_normalization_modes`, but to unnormalize to + the original scale. + """ + + n_obs_steps: int = 1 + input_shapes: dict[str, list[int]] = field(default_factory=lambda: {}) + output_shapes: dict[str, list[int]] = field(default_factory=lambda: {}) + input_normalization_modes: dict[str, str] = field(default_factory=lambda: {}) + output_normalization_modes: dict[str, str] = field(default_factory=lambda: {}) + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + @property + def observation_delta_indices(self) -> list | None: + raise NotImplementedError + + @property + def action_delta_indices(self) -> list | None: + raise NotImplementedError + + @property + def reward_delta_indices(self) -> list | None: + raise NotImplementedError diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 9a0b7e4cbb..d8d18b6aea 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -18,16 +18,15 @@ from concurrent.futures import ThreadPoolExecutor from contextlib import nullcontext from copy import deepcopy +from dataclasses import asdict from pathlib import Path from pprint import pformat from threading import Lock +import draccus import hydra import numpy as np import torch -from deepdiff import DeepDiff -from omegaconf import DictConfig, ListConfig, OmegaConf -from termcolor import colored from torch import nn from torch.cuda.amp import GradScaler @@ -44,14 +43,14 @@ from lerobot.common.utils.utils import ( format_big_number, get_safe_torch_device, - init_hydra_config, init_logging, set_global_seed, ) +from lerobot.configs.default import MainConfig from lerobot.scripts.eval import eval_policy -def make_optimizer_and_scheduler(cfg, policy): +def make_optimizer_and_scheduler(cfg: MainConfig, policy): if cfg.policy.name == "act": optimizer_params_dicts = [ { @@ -234,74 +233,76 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): logger.log_dict(info, step, mode="eval") -def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = None): - if out_dir is None: - raise NotImplementedError() - if job_name is None: - raise NotImplementedError() +@draccus.wrap() +def train(cfg: MainConfig, out_dir: str | None = None, job_name: str | None = None): + # if out_dir is None: + # raise NotImplementedError() + # if job_name is None: + # raise NotImplementedError() init_logging() - logging.info(pformat(OmegaConf.to_container(cfg))) - - if cfg.training.online_steps > 0 and isinstance(cfg.dataset_repo_id, ListConfig): - raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") - - # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need - # to check for any differences between the provided config and the checkpoint's config. - if cfg.resume: - if not Logger.get_last_checkpoint_dir(out_dir).exists(): - raise RuntimeError( - "You have set resume=True, but there is no model checkpoint in " - f"{Logger.get_last_checkpoint_dir(out_dir)}" - ) - checkpoint_cfg_path = str(Logger.get_last_pretrained_model_dir(out_dir) / "config.yaml") - logging.info( - colored( - "You have set resume=True, indicating that you wish to resume a run", - color="yellow", - attrs=["bold"], - ) - ) - # Get the configuration file from the last checkpoint. - checkpoint_cfg = init_hydra_config(checkpoint_cfg_path) - # Check for differences between the checkpoint configuration and provided configuration. - # Hack to resolve the delta_timestamps ahead of time in order to properly diff. - resolve_delta_timestamps(cfg) - diff = DeepDiff(OmegaConf.to_container(checkpoint_cfg), OmegaConf.to_container(cfg)) - # Ignore the `resume` and parameters. - if "values_changed" in diff and "root['resume']" in diff["values_changed"]: - del diff["values_changed"]["root['resume']"] - # Log a warning about differences between the checkpoint configuration and the provided - # configuration. - if len(diff) > 0: - logging.warning( - "At least one difference was detected between the checkpoint configuration and " - f"the provided configuration: \n{pformat(diff)}\nNote that the checkpoint configuration " - "takes precedence.", - ) - # Use the checkpoint config instead of the provided config (but keep `resume` parameter). - cfg = checkpoint_cfg - cfg.resume = True - elif Logger.get_last_checkpoint_dir(out_dir).exists(): - raise RuntimeError( - f"The configured output directory {Logger.get_last_checkpoint_dir(out_dir)} already exists. If " - "you meant to resume training, please use `resume=true` in your command or yaml configuration." - ) - - if cfg.eval.batch_size > cfg.eval.n_episodes: - raise ValueError( - "The eval batch size is greater than the number of eval episodes " - f"({cfg.eval.batch_size} > {cfg.eval.n_episodes}). As a result, {cfg.eval.batch_size} " - f"eval environments will be instantiated, but only {cfg.eval.n_episodes} will be used. " - "This might significantly slow down evaluation. To fix this, you should update your command " - f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={cfg.eval.batch_size}`), " - f"or lower the batch size (e.g. `eval.batch_size={cfg.eval.n_episodes}`)." - ) + logging.info(pformat(asdict(cfg))) + + # if cfg.training.online_steps > 0 and isinstance(cfg.dataset_repo_id, ListConfig): + # raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") + + # # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need + # # to check for any differences between the provided config and the checkpoint's config. + # if cfg.resume: + # if not Logger.get_last_checkpoint_dir(out_dir).exists(): + # raise RuntimeError( + # "You have set resume=True, but there is no model checkpoint in " + # f"{Logger.get_last_checkpoint_dir(out_dir)}" + # ) + # checkpoint_cfg_path = str(Logger.get_last_pretrained_model_dir(out_dir) / "config.yaml") + # logging.info( + # colored( + # "You have set resume=True, indicating that you wish to resume a run", + # color="yellow", + # attrs=["bold"], + # ) + # ) + # # Get the configuration file from the last checkpoint. + # checkpoint_cfg = init_hydra_config(checkpoint_cfg_path) + # # Check for differences between the checkpoint configuration and provided configuration. + # # Hack to resolve the delta_timestamps ahead of time in order to properly diff. + # resolve_delta_timestamps(cfg) + # diff = DeepDiff(OmegaConf.to_container(checkpoint_cfg), OmegaConf.to_container(cfg)) + # # Ignore the `resume` and parameters. + # if "values_changed" in diff and "root['resume']" in diff["values_changed"]: + # del diff["values_changed"]["root['resume']"] + # # Log a warning about differences between the checkpoint configuration and the provided + # # configuration. + # if len(diff) > 0: + # logging.warning( + # "At least one difference was detected between the checkpoint configuration and " + # f"the provided configuration: \n{pformat(diff)}\nNote that the checkpoint configuration " + # "takes precedence.", + # ) + # # Use the checkpoint config instead of the provided config (but keep `resume` parameter). + # cfg = checkpoint_cfg + # cfg.resume = True + # elif Logger.get_last_checkpoint_dir(out_dir).exists(): + # raise RuntimeError( + # f"The configured output directory {Logger.get_last_checkpoint_dir(out_dir)} already exists. If " + # "you meant to resume training, please use `resume=true` in your command or yaml configuration." + # ) + + # if cfg.eval.batch_size > cfg.eval.n_episodes: + # raise ValueError( + # "The eval batch size is greater than the number of eval episodes " + # f"({cfg.eval.batch_size} > {cfg.eval.n_episodes}). As a result, {cfg.eval.batch_size} " + # f"eval environments will be instantiated, but only {cfg.eval.n_episodes} will be used. " + # "This might significantly slow down evaluation. To fix this, you should update your command " + # f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={cfg.eval.batch_size}`), " + # f"or lower the batch size (e.g. `eval.batch_size={cfg.eval.n_episodes}`)." + # ) # log metrics to terminal and wandb - logger = Logger(cfg, out_dir, wandb_job_name=job_name) + logger = Logger(cfg) - set_global_seed(cfg.seed) + if cfg.seed is not None: + set_global_seed(cfg.seed) # Check device is available device = get_safe_torch_device(cfg.device, log=True) @@ -666,4 +667,5 @@ def train_notebook(out_dir=None, job_name=None, config_name="default", config_pa if __name__ == "__main__": - train_cli() + # train_cli() + train() diff --git a/poetry.lock b/poetry.lock index 8799e67cab..f8d1cf2028 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1303,6 +1303,27 @@ files = [ [package.dependencies] pyarrow = "*" +[[package]] +name = "draccus" +version = "0.9.3" +description = "A slightly opinionated framework for simple dataclass-based configurations based on Pyrallis." +optional = false +python-versions = ">=3.8" +files = [ + {file = "draccus-0.9.3-py3-none-any.whl", hash = "sha256:04d3fe14d2b7d19290e6f7c76ff29fbfcc9b56e9e7b76d9439a18a26a1dbfe5e"}, + {file = "draccus-0.9.3.tar.gz", hash = "sha256:41db52347f5513deadfb8d512fed43bb41499ac5e63559530688c1d95a978043"}, +] + +[package.dependencies] +mergedeep = ">=1.3,<2.0" +pyyaml = ">=6.0,<7.0" +pyyaml-include = ">=1.4,<2.0" +toml = ">=0.10,<1.0" +typing-inspect = ">=0.9.0,<0.10.0" + +[package.extras] +dev = ["black", "mypy", "pre-commit", "pytest", "ruff"] + [[package]] name = "drawnow" version = "0.72.5" @@ -3505,6 +3526,17 @@ files = [ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + [[package]] name = "meshio" version = "5.3.5" @@ -3719,6 +3751,17 @@ files = [ [package.dependencies] dill = ">=0.3.8" +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + [[package]] name = "nbclient" version = "0.10.0" @@ -5614,6 +5657,23 @@ files = [ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] +[[package]] +name = "pyyaml-include" +version = "1.4.1" +description = "Extending PyYAML with a custom constructor for including YAML files within YAML files" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyyaml-include-1.4.1.tar.gz", hash = "sha256:1a96e33a99a3e56235f5221273832464025f02ff3d8539309a3bf00dec624471"}, + {file = "pyyaml_include-1.4.1-py3-none-any.whl", hash = "sha256:323c7f3a19c82fbc4d73abbaab7ef4f793e146a13383866831631b26ccc7fb00"}, +] + +[package.dependencies] +PyYAML = ">=6.0,<7.0" + +[package.extras] +toml = ["toml"] + [[package]] name = "pyzmq" version = "26.2.0" @@ -6798,6 +6858,17 @@ webencodings = ">=0.4" doc = ["sphinx", "sphinx_rtd_theme"] test = ["pytest", "ruff"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tomli" version = "2.0.2" @@ -7035,6 +7106,21 @@ files = [ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + [[package]] name = "tzdata" version = "2024.2" @@ -7569,4 +7655,4 @@ xarm = ["gym-xarm"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "41344f0eb2d06d9a378abcd10df8205aa3926ff0a08ac5ab1a0b1bcae7440fd8" +content-hash = "0ef550e648ac7eae32b18584d4facb7a83cf1e0ee9a6705daf89783fb56db8fb" diff --git a/pyproject.toml b/pyproject.toml index 59c2de8bcf..2e7e1328f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,6 +71,7 @@ pyrender = {git = "https://github.com/mmatl/pyrender.git", markers = "sys_platfo hello-robot-stretch-body = {version = ">=0.7.27", markers = "sys_platform == 'linux'", optional = true} pyserial = {version = ">=3.5", optional = true} jsonlines = ">=4.0.0" +draccus = "^0.9.3" [tool.poetry.extras] From 82f197baab92bc2a191b4c8f0ee80c106728ae73 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Wed, 18 Dec 2024 12:56:41 +0100 Subject: [PATCH 02/94] WIP refactor train.py and ACT --- lerobot/common/datasets/factory.py | 9 +- lerobot/common/datasets/transforms.py | 2 +- lerobot/common/envs/configs.py | 16 +- lerobot/common/envs/factory.py | 16 +- lerobot/common/logger.py | 19 +- lerobot/common/optim/__init__.py | 1 + lerobot/common/optim/factory.py | 62 +++++ lerobot/common/optim/optimizers.py | 44 ++++ lerobot/common/policies/__init__.py | 1 + .../common/policies/act/configuration_act.py | 48 ++-- lerobot/common/policies/act/modeling_act.py | 113 ++++---- lerobot/common/policies/factory.py | 72 ++--- lerobot/common/policies/normalize.py | 85 +++--- lerobot/common/policies/policy_protocol.py | 5 + lerobot/common/policies/utils.py | 18 ++ lerobot/common/utils/utils.py | 8 +- lerobot/configs/default.py | 34 ++- lerobot/configs/policies.py | 143 +++++++++- lerobot/scripts/train.py | 249 ++++-------------- 19 files changed, 544 insertions(+), 401 deletions(-) create mode 100644 lerobot/common/optim/__init__.py create mode 100644 lerobot/common/optim/factory.py create mode 100644 lerobot/common/optim/optimizers.py diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index c5919ccb7e..9e2730eee2 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from pprint import pformat import torch @@ -101,7 +102,7 @@ def make_dataset(cfg: MainConfig, split: str = "train") -> LeRobotDataset | Mult episodes=cfg.dataset.episodes, delta_timestamps=delta_timestamps, image_transforms=image_transforms, - video_backend=cfg.video_backend, + video_backend=cfg.dataset.video_backend, local_files_only=cfg.dataset.local_files_only, ) else: @@ -110,7 +111,11 @@ def make_dataset(cfg: MainConfig, split: str = "train") -> LeRobotDataset | Mult # TODO(aliberts): add proper support for multi dataset # delta_timestamps=delta_timestamps, image_transforms=image_transforms, - video_backend=cfg.video_backend, + video_backend=cfg.dataset.video_backend, + ) + logging.info( + "Multiple datasets were provided. Applied the following index mapping to the provided datasets: " + f"{pformat(dataset.repo_id_to_index , indent=2)}" ) if cfg.dataset.use_imagenet_stats: diff --git a/lerobot/common/datasets/transforms.py b/lerobot/common/datasets/transforms.py index 696ccb02e8..6c4304d25a 100644 --- a/lerobot/common/datasets/transforms.py +++ b/lerobot/common/datasets/transforms.py @@ -212,7 +212,7 @@ class ImageTransformConfig: (following uniform distribution) when it's applied. """ - weight: int = 1.0 + weight: float = 1.0 type: str = "Identity" kwargs: Dict[str, Any] = field(default_factory=dict) diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py index b4414dee65..9449e3deba 100644 --- a/lerobot/common/envs/configs.py +++ b/lerobot/common/envs/configs.py @@ -1,10 +1,17 @@ -from dataclasses import dataclass +from dataclasses import dataclass, field import draccus +@dataclass +class GymConfig: + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + + @dataclass class EnvConfig(draccus.ChoiceRegistry): + n_envs: int | None = None task: str | None = None state_dim: int = 18 action_dim: int = 18 @@ -28,3 +35,10 @@ class AlohaEnv(EnvConfig): state_dim: int = 14 action_dim: int = 14 fps: int = 50 + episode_length: int = 400 + gym: dict = field( + default_factory=lambda: { + "obs_type": "pixels_agent_pos", + "render_mode": "rgb_array", + } + ) diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 54f24ea84b..2ad60409ef 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -16,34 +16,36 @@ import importlib import gymnasium as gym -from omegaconf import DictConfig +from lerobot.configs.default import MainConfig -def make_env(cfg: DictConfig, n_envs: int | None = None) -> gym.vector.VectorEnv | None: + +def make_env(cfg: MainConfig) -> gym.vector.VectorEnv | None: """Makes a gym vector environment according to the evaluation config. n_envs can be used to override eval.batch_size in the configuration. Must be at least 1. """ + n_envs = cfg.training.online.rollout_batch_size if n_envs is not None and n_envs < 1: raise ValueError("`n_envs must be at least 1") - if cfg.env.name == "real_world": + if cfg.env.type == "real_world": return - package_name = f"gym_{cfg.env.name}" + package_name = f"gym_{cfg.env.type}" try: importlib.import_module(package_name) except ModuleNotFoundError as e: print( - f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.env.name}]'`" + f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.env.type}]'`" ) raise e gym_handle = f"{package_name}/{cfg.env.task}" - gym_kwgs = dict(cfg.env.get("gym", {})) + gym_kwgs = getattr(cfg.env, "gym", {}) - if cfg.env.get("episode_length"): + if getattr(cfg.env, "episode_length", None): gym_kwgs["max_episode_steps"] = cfg.env.episode_length # batched version of the env that returns an observation of shape (b, c) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index b534c92b87..7a1622f3ea 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -24,6 +24,7 @@ from glob import glob from pathlib import Path +import draccus import torch from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE from omegaconf import DictConfig, OmegaConf @@ -34,6 +35,7 @@ from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import get_global_random_state, set_global_random_state from lerobot.configs.default import MainConfig +from lerobot.configs.policies import FeatureType, NormalizationMode def log_output_dir(out_dir): @@ -151,10 +153,13 @@ def save_model(self, save_dir: Path, policy: Policy, wandb_artifact_name: str | Optionally also upload the model to WandB. """ + self.checkpoints_dir.mkdir(parents=True, exist_ok=True) + # register_features_types() policy.save_pretrained(save_dir) - # Also save the full Hydra config for the env configuration. - OmegaConf.save(self._cfg, save_dir / "config.yaml") + # Also save the full config for the env configuration. + with open(save_dir / "config.yaml", "w") as f: + draccus.dump(self._cfg, f) if self._wandb and not self._cfg.wandb.disable_artifact: # note wandb artifact does not accept ":" or "/" in its name artifact = self._wandb.Artifact(wandb_artifact_name, type="model") @@ -221,7 +226,7 @@ def load_last_training_state(self, optimizer: Optimizer, scheduler: LRScheduler set_global_random_state({k: training_state[k] for k in get_global_random_state()}) return training_state["step"] - def log_dict(self, d, step, mode="train"): + def log_dict(self, d: dict, step: int, mode: str = "train"): assert mode in {"train", "eval"} # TODO(alexander-soare): Add local text log. if self._wandb is not None: @@ -238,3 +243,11 @@ def log_video(self, video_path: str, step: int, mode: str = "train"): assert self._wandb is not None wandb_video = self._wandb.Video(video_path, fps=self._cfg.fps, format="mp4") self._wandb.log({f"{mode}/video": wandb_video}, step=step) + + +def register_features_types(): + draccus.decode.register(FeatureType, lambda x: FeatureType[x]) + draccus.encode.register(FeatureType, lambda x: x.name) + + draccus.decode.register(NormalizationMode, lambda x: NormalizationMode[x]) + draccus.encode.register(NormalizationMode, lambda x: x.name) diff --git a/lerobot/common/optim/__init__.py b/lerobot/common/optim/__init__.py new file mode 100644 index 0000000000..e1e6596639 --- /dev/null +++ b/lerobot/common/optim/__init__.py @@ -0,0 +1 @@ +from .optimizers import OptimizerConfig as OptimizerConfig diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py new file mode 100644 index 0000000000..cfa10fb690 --- /dev/null +++ b/lerobot/common/optim/factory.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LRScheduler + +from lerobot.common.policies import Policy +from lerobot.configs.default import MainConfig + + +def make_optimizer_and_scheduler(cfg: MainConfig, policy: Policy) -> tuple[Optimizer, LRScheduler | None]: + params = policy.get_optim_params() if cfg.use_policy_optimizer_preset else policy.parameters() + optimizer = cfg.optimizer.build(params) + + if not cfg.use_policy_optimizer_preset: + lr_scheduler = None + return optimizer, lr_scheduler + + if cfg.policy.type == "act": + lr_scheduler = None + elif cfg.policy.type == "diffusion": + from diffusers.optimization import get_scheduler + + lr_scheduler = get_scheduler( + cfg.training.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=cfg.training.lr_warmup_steps, + num_training_steps=cfg.training.offline_steps, + ) + elif cfg.policy.type == "tdmpc": + optimizer = torch.optim.Adam(params, cfg.training.lr) + lr_scheduler = None + elif cfg.policy.type == "vqbet": + # from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTOptimizer, VQBeTScheduler + # optimizer = VQBeTOptimizer(policy, cfg) + optimizer = torch.optim.Adam( + params, + cfg.optimizer.lr, + cfg.optimizer.adam_betas, + cfg.optimizer.adam_eps, + ) + from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTScheduler + + lr_scheduler = VQBeTScheduler(optimizer, cfg) + else: + raise NotImplementedError() + + return optimizer, lr_scheduler diff --git a/lerobot/common/optim/optimizers.py b/lerobot/common/optim/optimizers.py new file mode 100644 index 0000000000..b085211227 --- /dev/null +++ b/lerobot/common/optim/optimizers.py @@ -0,0 +1,44 @@ +import abc +from dataclasses import asdict, dataclass + +import draccus +import torch + + +@dataclass +class OptimizerConfig(draccus.ChoiceRegistry, abc.ABC): + lr: float = 1e-5 + betas: tuple[float, float] = (0.9, 0.999) + eps: float = 1e-8 + weight_decay: float = 1e-4 + grad_clip_norm: float = 10.0 + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + @classmethod + def default_choice_name(cls) -> str | None: + return "adam" + + @abc.abstractmethod + def build(self) -> torch.optim.Optimizer: + raise NotImplementedError + + +@OptimizerConfig.register_subclass("adam") +@dataclass +class AdamConfig(OptimizerConfig): + def build(self, params: dict) -> torch.optim.Optimizer: + kwargs = asdict(self) + kwargs.pop("grad_clip_norm") + return torch.optim.Adam(params, **kwargs) + + +@OptimizerConfig.register_subclass("adamw") +@dataclass +class AdamWConfig(OptimizerConfig): + def build(self, params: dict) -> torch.optim.Optimizer: + kwargs = asdict(self) + kwargs.pop("grad_clip_norm") + return torch.optim.AdamW(params, **kwargs) diff --git a/lerobot/common/policies/__init__.py b/lerobot/common/policies/__init__.py index 58db9849f3..5d1e24e551 100644 --- a/lerobot/common/policies/__init__.py +++ b/lerobot/common/policies/__init__.py @@ -1,4 +1,5 @@ from .act.configuration_act import ACTConfig as ACTConfig from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig +from .policy_protocol import Policy as Policy from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig from .vqbet.configuration_vqbet import VQBeTConfig as VQBeTConfig diff --git a/lerobot/common/policies/act/configuration_act.py b/lerobot/common/policies/act/configuration_act.py index 5c0d209340..b22989f3d5 100644 --- a/lerobot/common/policies/act/configuration_act.py +++ b/lerobot/common/policies/act/configuration_act.py @@ -15,7 +15,8 @@ # limitations under the License. from dataclasses import dataclass, field -from lerobot.configs.policies import PretrainedConfig +from lerobot.common.optim.optimizers import AdamWConfig +from lerobot.configs.policies import FeatureType, NormalizationMode, PretrainedConfig @PretrainedConfig.register_subclass("act") @@ -93,28 +94,11 @@ class ACTConfig(PretrainedConfig): chunk_size: int = 100 n_action_steps: int = 100 - input_shapes: dict[str, list[int]] = field( + normalization_mapping: dict[FeatureType, NormalizationMode] = field( default_factory=lambda: { - "observation.images.top": [3, 480, 640], - "observation.state": [14], - } - ) - output_shapes: dict[str, list[int]] = field( - default_factory=lambda: { - "action": [14], - } - ) - - # Normalization / Unnormalization - input_normalization_modes: dict[str, str] = field( - default_factory=lambda: { - "observation.images.top": "mean_std", - "observation.state": "mean_std", - } - ) - output_normalization_modes: dict[str, str] = field( - default_factory=lambda: { - "action": "mean_std", + FeatureType.VISUAL: NormalizationMode.MEAN_STD, + FeatureType.STATE: NormalizationMode.MEAN_STD, + FeatureType.ACTION: NormalizationMode.MEAN_STD, } ) @@ -147,6 +131,15 @@ class ACTConfig(PretrainedConfig): dropout: float = 0.1 kl_weight: float = 10.0 + # Optimizer presets + optimizer_preset: AdamWConfig = field( + default_factory=lambda: AdamWConfig( + lr=1e-5, + weight_decay=1e-4, + ) + ) + lr_backbone: float = 1e-5 + def __post_init__(self): """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): @@ -167,11 +160,12 @@ def __post_init__(self): raise ValueError( f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" ) - if ( - not any(k.startswith("observation.image") for k in self.input_shapes) - and "observation.environment_state" not in self.input_shapes - ): - raise ValueError("You must provide at least one image or the environment state among the inputs.") + # TODO(aliberts): move this check into the policy init + # if ( + # not any(k.startswith("observation.image") for k in self.input_shapes) + # and "observation.environment_state" not in self.input_shapes + # ): + # raise ValueError("You must provide at least one image or the environment state among the inputs.") @property def observation_delta_indices(self) -> None: diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index 418863a143..c181b02d08 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -54,7 +54,7 @@ class ACTPolicy( def __init__( self, - config: ACTConfig | None = None, + config: ACTConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): """ @@ -65,29 +65,40 @@ def __init__( that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__() - if config is None: - config = ACTConfig() - self.config: ACTConfig = config + self.config = config - self.normalize_inputs = Normalize( - config.input_shapes, config.input_normalization_modes, dataset_stats - ) - self.normalize_targets = Normalize( - config.output_shapes, config.output_normalization_modes, dataset_stats - ) - self.unnormalize_outputs = Unnormalize( - config.output_shapes, config.output_normalization_modes, dataset_stats - ) + self.normalize_inputs = Normalize(config.input_features, dataset_stats) + self.normalize_targets = Normalize(config.output_features, dataset_stats) + self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) self.model = ACT(config) - self.expected_image_keys = [k for k in config.input_shapes if k.startswith("observation.image")] - if config.temporal_ensemble_coeff is not None: self.temporal_ensembler = ACTTemporalEnsembler(config.temporal_ensemble_coeff, config.chunk_size) self.reset() + def get_optim_params(self) -> dict: + # TODO(aliberts, rcadene): As of now, lr_backbone == lr + # Should we remove this and just `return self.parameters()`? + return [ + { + "params": [ + p + for n, p in self.named_parameters() + if not n.startswith("model.backbone") and p.requires_grad + ] + }, + { + "params": [ + p + for n, p in self.named_parameters() + if n.startswith("model.backbone") and p.requires_grad + ], + "lr": self.config.lr_backbone, + }, + ] + def reset(self): """This should be called whenever the environment is reset.""" if self.config.temporal_ensemble_coeff is not None: @@ -106,9 +117,11 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: self.eval() batch = self.normalize_inputs(batch) - if len(self.expected_image_keys) > 0: + if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) + batch["observation.images"] = torch.stack( + [batch[ft.key] for ft in self.config.image_features], dim=-4 + ) # If we are doing temporal ensembling, do online updates where we keep track of the number of actions # we are ensembling over. @@ -134,9 +147,11 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) - if len(self.expected_image_keys) > 0: + if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) + batch["observation.images"] = torch.stack( + [batch[ft.key] for ft in self.config.image_features], dim=-4 + ) batch = self.normalize_targets(batch) actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch) @@ -288,31 +303,31 @@ class ACT(nn.Module): """ def __init__(self, config: ACTConfig): - super().__init__() - self.config = config # BERT style VAE encoder with input tokens [cls, robot_state, *action_sequence]. # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]). - self.use_robot_state = "observation.state" in config.input_shapes - self.use_images = any(k.startswith("observation.image") for k in config.input_shapes) - self.use_env_state = "observation.environment_state" in config.input_shapes + super().__init__() + self.config = config + if self.config.use_vae: self.vae_encoder = ACTEncoder(config, is_vae_encoder=True) self.vae_encoder_cls_embed = nn.Embedding(1, config.dim_model) # Projection layer for joint-space configuration to hidden dimension. - if self.use_robot_state: + if self.config.robot_state_feature: self.vae_encoder_robot_state_input_proj = nn.Linear( - config.input_shapes["observation.state"][0], config.dim_model + self.config.robot_state_feature.shape[0], config.dim_model ) # Projection layer for action (joint-space target) to hidden dimension. self.vae_encoder_action_input_proj = nn.Linear( - config.output_shapes["action"][0], config.dim_model + self.config.action_feature.shape[0], + config.dim_model, + # config.output_shapes["action"][0], config.dim_model ) # Projection layer from the VAE encoder's output to the latent distribution's parameter space. self.vae_encoder_latent_output_proj = nn.Linear(config.dim_model, config.latent_dim * 2) # Fixed sinusoidal positional embedding for the input to the VAE encoder. Unsqueeze for batch # dimension. num_input_token_encoder = 1 + config.chunk_size - if self.use_robot_state: + if self.config.robot_state_feature: num_input_token_encoder += 1 self.register_buffer( "vae_encoder_pos_enc", @@ -320,7 +335,7 @@ def __init__(self, config: ACTConfig): ) # Backbone for image feature extraction. - if self.use_images: + if self.config.image_features: backbone_model = getattr(torchvision.models, config.vision_backbone)( replace_stride_with_dilation=[False, False, config.replace_final_stride_with_dilation], weights=config.pretrained_backbone_weights, @@ -337,27 +352,27 @@ def __init__(self, config: ACTConfig): # Transformer encoder input projections. The tokens will be structured like # [latent, (robot_state), (env_state), (image_feature_map_pixels)]. - if self.use_robot_state: + if self.config.robot_state_feature: self.encoder_robot_state_input_proj = nn.Linear( - config.input_shapes["observation.state"][0], config.dim_model + self.config.robot_state_feature.shape[0], config.dim_model ) - if self.use_env_state: + if self.config.env_state_feature: self.encoder_env_state_input_proj = nn.Linear( - config.input_shapes["observation.environment_state"][0], config.dim_model + self.config.env_state_feature.shape[0], config.dim_model ) self.encoder_latent_input_proj = nn.Linear(config.latent_dim, config.dim_model) - if self.use_images: + if self.config.image_features: self.encoder_img_feat_input_proj = nn.Conv2d( backbone_model.fc.in_features, config.dim_model, kernel_size=1 ) # Transformer encoder positional embeddings. n_1d_tokens = 1 # for the latent - if self.use_robot_state: + if self.config.robot_state_feature: n_1d_tokens += 1 - if self.use_env_state: + if self.config.env_state_feature: n_1d_tokens += 1 self.encoder_1d_feature_pos_embed = nn.Embedding(n_1d_tokens, config.dim_model) - if self.use_images: + if self.config.image_features: self.encoder_cam_feat_pos_embed = ACTSinusoidalPositionEmbedding2d(config.dim_model // 2) # Transformer decoder. @@ -365,7 +380,7 @@ def __init__(self, config: ACTConfig): self.decoder_pos_embed = nn.Embedding(config.chunk_size, config.dim_model) # Final action regression head on the output of the transformer's decoder. - self.action_head = nn.Linear(config.dim_model, config.output_shapes["action"][0]) + self.action_head = nn.Linear(config.dim_model, self.config.action_feature.shape[0]) self._reset_parameters() @@ -380,13 +395,13 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso `batch` should have the following structure: { - "observation.state" (optional): (B, state_dim) batch of robot states. + [robot_state_feature] (optional): (B, state_dim) batch of robot states. - "observation.images": (B, n_cameras, C, H, W) batch of images. + [image_features]: (B, n_cameras, C, H, W) batch of images. AND/OR - "observation.environment_state": (B, env_dim) batch of environment states. + [env_state_feature]: (B, env_dim) batch of environment states. - "action" (optional, only if training with VAE): (B, chunk_size, action dim) batch of actions. + [action_feature] (optional, only if training with VAE): (B, chunk_size, action dim) batch of actions. } Returns: @@ -396,7 +411,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso """ if self.config.use_vae and self.training: assert ( - "action" in batch + self.config.action_feature.key in batch ), "actions must be provided when using the variational objective in training mode." batch_size = ( @@ -411,12 +426,12 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso cls_embed = einops.repeat( self.vae_encoder_cls_embed.weight, "1 d -> b 1 d", b=batch_size ) # (B, 1, D) - if self.use_robot_state: + if self.config.robot_state_feature: robot_state_embed = self.vae_encoder_robot_state_input_proj(batch["observation.state"]) robot_state_embed = robot_state_embed.unsqueeze(1) # (B, 1, D) action_embed = self.vae_encoder_action_input_proj(batch["action"]) # (B, S, D) - if self.use_robot_state: + if self.config.robot_state_feature: vae_encoder_input = [cls_embed, robot_state_embed, action_embed] # (B, S+2, D) else: vae_encoder_input = [cls_embed, action_embed] @@ -430,7 +445,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso # sequence depending whether we use the input states or not (cls and robot state) # False means not a padding token. cls_joint_is_pad = torch.full( - (batch_size, 2 if self.use_robot_state else 1), + (batch_size, 2 if self.config.robot_state_feature else 1), False, device=batch["observation.state"].device, ) @@ -463,16 +478,16 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso encoder_in_tokens = [self.encoder_latent_input_proj(latent_sample)] encoder_in_pos_embed = list(self.encoder_1d_feature_pos_embed.weight.unsqueeze(1)) # Robot state token. - if self.use_robot_state: + if self.config.robot_state_feature: encoder_in_tokens.append(self.encoder_robot_state_input_proj(batch["observation.state"])) # Environment state token. - if self.use_env_state: + if self.config.env_state_feature: encoder_in_tokens.append( self.encoder_env_state_input_proj(batch["observation.environment_state"]) ) # Camera observation features and positional embeddings. - if self.use_images: + if self.config.image_features: all_cam_features = [] all_cam_pos_embeds = [] diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 5cb2fd5269..2d4a2c07d1 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -13,89 +13,56 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import inspect -import logging -from omegaconf import DictConfig, OmegaConf +from torch import nn +from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata from lerobot.common.policies.policy_protocol import Policy -from lerobot.common.utils.utils import get_safe_torch_device +from lerobot.configs.default import MainConfig -def _policy_cfg_from_hydra_cfg(policy_cfg_class, hydra_cfg): - expected_kwargs = set(inspect.signature(policy_cfg_class).parameters) - if not set(hydra_cfg.policy).issuperset(expected_kwargs): - logging.warning( - f"Hydra config is missing arguments: {set(expected_kwargs).difference(hydra_cfg.policy)}" - ) - - # OmegaConf.to_container returns lists where sequences are found, but our dataclasses use tuples to avoid - # issues with mutable defaults. This filter changes all lists to tuples. - def list_to_tuple(item): - return tuple(item) if isinstance(item, list) else item - - policy_cfg = policy_cfg_class( - **{ - k: list_to_tuple(v) - for k, v in OmegaConf.to_container(hydra_cfg.policy, resolve=True).items() - if k in expected_kwargs - } - ) - return policy_cfg - - -def get_policy_and_config_classes(name: str) -> tuple[Policy, object]: +def get_policy_class(name: str) -> Policy: """Get the policy's class and config class given a name (matching the policy class' `name` attribute).""" if name == "tdmpc": - from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.tdmpc.modeling_tdmpc import TDMPCPolicy - return TDMPCPolicy, TDMPCConfig + return TDMPCPolicy elif name == "diffusion": - from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy - return DiffusionPolicy, DiffusionConfig + return DiffusionPolicy elif name == "act": - from lerobot.common.policies.act.configuration_act import ACTConfig from lerobot.common.policies.act.modeling_act import ACTPolicy - return ACTPolicy, ACTConfig + return ACTPolicy elif name == "vqbet": - from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTPolicy - return VQBeTPolicy, VQBeTConfig + return VQBeTPolicy else: raise NotImplementedError(f"Policy with name {name} is not implemented.") def make_policy( - hydra_cfg: DictConfig, pretrained_policy_name_or_path: str | None = None, dataset_stats=None + cfg: MainConfig, ds_meta: LeRobotDatasetMetadata, pretrained_policy_name_or_path: str | None = None ) -> Policy: """Make an instance of a policy class. Args: - hydra_cfg: A parsed Hydra configuration (see scripts). If `pretrained_policy_name_or_path` is - provided, only `hydra_cfg.policy.name` is used while everything else is ignored. + cfg (MainConfig): A MainConfig instance (see scripts). If `pretrained_policy_name_or_path` is + provided, only `cfg.policy.type` is used while everything else is ignored. + ds_meta (LeRobotDatasetMetadata): Dataset metadata to take input/output shapes and statistics to use + for (un)normalization of inputs/outputs in the policy. pretrained_policy_name_or_path: Either the repo ID of a model hosted on the Hub or a path to a directory containing weights saved using `Policy.save_pretrained`. Note that providing this - argument overrides everything in `hydra_cfg.policy` apart from `hydra_cfg.policy.name`. - dataset_stats: Dataset statistics to use for (un)normalization of inputs/outputs in the policy. Must - be provided when initializing a new policy, and must not be provided when loading a pretrained - policy. Therefore, this argument is mutually exclusive with `pretrained_policy_name_or_path`. + argument overrides everything in `hydra_cfg.policy` apart from `hydra_cfg.policy.type`. """ - if not (pretrained_policy_name_or_path is None) ^ (dataset_stats is None): - raise ValueError( - "Exactly one of `pretrained_policy_name_or_path` and `dataset_stats` must be provided." - ) - - policy_cls, policy_cfg_class = get_policy_and_config_classes(hydra_cfg.policy.name) + policy_cls = get_policy_class(cfg.policy.type) + cfg.policy.parse_features_from_dataset(ds_meta) - policy_cfg = _policy_cfg_from_hydra_cfg(policy_cfg_class, hydra_cfg) if pretrained_policy_name_or_path is None: # Make a fresh policy. - policy = policy_cls(policy_cfg, dataset_stats) + policy = policy_cls(cfg.policy, ds_meta.stats) else: # Load a pretrained policy and override the config if needed (for example, if there are inference-time # hyperparameters that we want to vary). @@ -103,9 +70,10 @@ def make_policy( # pretrained weights which are then loaded into a fresh policy with the desired config. This PR in # huggingface_hub should make it possible to avoid the hack: # https://github.com/huggingface/huggingface_hub/pull/2274. - policy = policy_cls(policy_cfg) + policy = policy_cls(cfg.policy) policy.load_state_dict(policy_cls.from_pretrained(pretrained_policy_name_or_path).state_dict()) - policy.to(get_safe_torch_device(hydra_cfg.device)) + policy.to(cfg.device) + assert isinstance(policy, nn.Module) return policy diff --git a/lerobot/common/policies/normalize.py b/lerobot/common/policies/normalize.py index f2e1179c0e..2d0d9ca425 100644 --- a/lerobot/common/policies/normalize.py +++ b/lerobot/common/policies/normalize.py @@ -16,10 +16,11 @@ import torch from torch import Tensor, nn +from lerobot.configs.policies import FeatureType, NormalizationMode, PolicyFeature + def create_stats_buffers( - shapes: dict[str, list[int]], - modes: dict[str, str], + features: list[PolicyFeature], stats: dict[str, dict[str, Tensor]] | None = None, ) -> dict[str, dict[str, nn.ParameterDict]]: """ @@ -34,16 +35,16 @@ def create_stats_buffers( """ stats_buffers = {} - for key, mode in modes.items(): - assert mode in ["mean_std", "min_max"] + for ft in features: + assert isinstance(ft.normalization_mode, NormalizationMode) - shape = tuple(shapes[key]) + shape = tuple(ft.shape) - if "image" in key: + if ft.type is FeatureType.VISUAL: # sanity checks - assert len(shape) == 3, f"number of dimensions of {key} != 3 ({shape=}" + assert len(shape) == 3, f"number of dimensions of {ft.key} != 3 ({shape=}" c, h, w = shape - assert c < h and c < w, f"{key} is not channel first ({shape=})" + assert c < h and c < w, f"{ft.key} is not channel first ({shape=})" # override image shape to be invariant to height and width shape = (c, 1, 1) @@ -52,7 +53,7 @@ def create_stats_buffers( # we assert they are not infinity anymore. buffer = {} - if mode == "mean_std": + if ft.normalization_mode is NormalizationMode.MEAN_STD: mean = torch.ones(shape, dtype=torch.float32) * torch.inf std = torch.ones(shape, dtype=torch.float32) * torch.inf buffer = nn.ParameterDict( @@ -61,7 +62,7 @@ def create_stats_buffers( "std": nn.Parameter(std, requires_grad=False), } ) - elif mode == "min_max": + elif ft.normalization_mode is NormalizationMode.MIN_MAX: min = torch.ones(shape, dtype=torch.float32) * torch.inf max = torch.ones(shape, dtype=torch.float32) * torch.inf buffer = nn.ParameterDict( @@ -76,14 +77,14 @@ def create_stats_buffers( # tensors anywhere (for example, when we use the same stats for normalization and # unnormalization). See the logic here # https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L97. - if mode == "mean_std": - buffer["mean"].data = stats[key]["mean"].clone() - buffer["std"].data = stats[key]["std"].clone() - elif mode == "min_max": - buffer["min"].data = stats[key]["min"].clone() - buffer["max"].data = stats[key]["max"].clone() - - stats_buffers[key] = buffer + if ft.normalization_mode is NormalizationMode.MEAN_STD: + buffer["mean"].data = stats[ft.key]["mean"].clone() + buffer["std"].data = stats[ft.key]["std"].clone() + elif ft.normalization_mode is NormalizationMode.MIN_MAX: + buffer["min"].data = stats[ft.key]["min"].clone() + buffer["max"].data = stats[ft.key]["max"].clone() + + stats_buffers[ft.key] = buffer return stats_buffers @@ -99,8 +100,7 @@ class Normalize(nn.Module): def __init__( self, - shapes: dict[str, list[int]], - modes: dict[str, str], + features: list[PolicyFeature], stats: dict[str, dict[str, Tensor]] | None = None, ): """ @@ -122,10 +122,9 @@ def __init__( dataset is not needed to get the stats, since they are already in the policy state_dict. """ super().__init__() - self.shapes = shapes - self.modes = modes + self.features = features self.stats = stats - stats_buffers = create_stats_buffers(shapes, modes, stats) + stats_buffers = create_stats_buffers(features, stats) for key, buffer in stats_buffers.items(): setattr(self, "buffer_" + key.replace(".", "_"), buffer) @@ -133,26 +132,26 @@ def __init__( @torch.no_grad def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) # shallow copy avoids mutating the input batch - for key, mode in self.modes.items(): - buffer = getattr(self, "buffer_" + key.replace(".", "_")) + for ft in self.features: + buffer = getattr(self, "buffer_" + ft.key.replace(".", "_")) - if mode == "mean_std": + if ft.normalization_mode is NormalizationMode.MEAN_STD: mean = buffer["mean"] std = buffer["std"] assert not torch.isinf(mean).any(), _no_stats_error_str("mean") assert not torch.isinf(std).any(), _no_stats_error_str("std") - batch[key] = (batch[key] - mean) / (std + 1e-8) - elif mode == "min_max": + batch[ft.key] = (batch[ft.key] - mean) / (std + 1e-8) + elif ft.normalization_mode is NormalizationMode.MIN_MAX: min = buffer["min"] max = buffer["max"] assert not torch.isinf(min).any(), _no_stats_error_str("min") assert not torch.isinf(max).any(), _no_stats_error_str("max") # normalize to [0,1] - batch[key] = (batch[key] - min) / (max - min + 1e-8) + batch[ft.key] = (batch[ft.key] - min) / (max - min + 1e-8) # normalize to [-1, 1] - batch[key] = batch[key] * 2 - 1 + batch[ft.key] = batch[ft.key] * 2 - 1 else: - raise ValueError(mode) + raise ValueError(ft.normalization_mode) return batch @@ -164,8 +163,7 @@ class Unnormalize(nn.Module): def __init__( self, - shapes: dict[str, list[int]], - modes: dict[str, str], + features: list[PolicyFeature], stats: dict[str, dict[str, Tensor]] | None = None, ): """ @@ -187,11 +185,10 @@ def __init__( dataset is not needed to get the stats, since they are already in the policy state_dict. """ super().__init__() - self.shapes = shapes - self.modes = modes + self.features = features self.stats = stats # `self.buffer_observation_state["mean"]` contains `torch.tensor(state_dim)` - stats_buffers = create_stats_buffers(shapes, modes, stats) + stats_buffers = create_stats_buffers(features, stats) for key, buffer in stats_buffers.items(): setattr(self, "buffer_" + key.replace(".", "_"), buffer) @@ -199,22 +196,22 @@ def __init__( @torch.no_grad def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) # shallow copy avoids mutating the input batch - for key, mode in self.modes.items(): - buffer = getattr(self, "buffer_" + key.replace(".", "_")) + for ft in self.features: + buffer = getattr(self, "buffer_" + ft.key.replace(".", "_")) - if mode == "mean_std": + if ft.normalization_mode is NormalizationMode.MEAN_STD: mean = buffer["mean"] std = buffer["std"] assert not torch.isinf(mean).any(), _no_stats_error_str("mean") assert not torch.isinf(std).any(), _no_stats_error_str("std") - batch[key] = batch[key] * std + mean - elif mode == "min_max": + batch[ft.key] = batch[ft.key] * std + mean + elif ft.normalization_mode is NormalizationMode.MIN_MAX: min = buffer["min"] max = buffer["max"] assert not torch.isinf(min).any(), _no_stats_error_str("min") assert not torch.isinf(max).any(), _no_stats_error_str("max") - batch[key] = (batch[key] + 1) / 2 - batch[key] = batch[key] * (max - min) + min + batch[ft.key] = (batch[ft.key] + 1) / 2 + batch[ft.key] = batch[ft.key] * (max - min) + min else: - raise ValueError(mode) + raise ValueError(ft.normalization_mode) return batch diff --git a/lerobot/common/policies/policy_protocol.py b/lerobot/common/policies/policy_protocol.py index 4e9e87afd4..7e25b8f0e9 100644 --- a/lerobot/common/policies/policy_protocol.py +++ b/lerobot/common/policies/policy_protocol.py @@ -44,6 +44,11 @@ def __init__(self, cfg, dataset_stats: dict[str, dict[str, Tensor]] | None = Non dataset_stats: Dataset statistics to be used for normalization. """ + def get_optim_params(self) -> dict: + """ + Returns the policy-specific parameters dict to be passed on to the optimizer. + """ + def reset(self): """To be called whenever the environment is reset. diff --git a/lerobot/common/policies/utils.py b/lerobot/common/policies/utils.py index 5a62daa2a7..38da5820e8 100644 --- a/lerobot/common/policies/utils.py +++ b/lerobot/common/policies/utils.py @@ -47,3 +47,21 @@ def get_dtype_from_parameters(module: nn.Module) -> torch.dtype: Note: assumes that all parameters have the same dtype. """ return next(iter(module.parameters())).dtype + + +def get_output_shape(module: nn.Module, input_shape: tuple) -> tuple: + """ + Calculates the output shape of a PyTorch module given an input shape. + + Args: + module (nn.Module): a PyTorch module + input_shape (tuple): A tuple representing the input shape, e.g., (batch_size, channels, height, width) + + Returns: + tuple: The output shape of the module. + """ + # dummy_input = torch.randn(*input_shape) + dummy_input = torch.zeros(size=input_shape) + with torch.inference_mode(): + output = module(dummy_input) + return tuple(output.shape) diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index 4e276e169b..8adcc2d62a 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -41,9 +41,9 @@ def inside_slurm(): return "SLURM_JOB_ID" in os.environ -def get_safe_torch_device(cfg_device: str, log: bool = False) -> torch.device: +def get_safe_torch_device(try_device: str, log: bool = False) -> torch.device: """Given a string, return a torch.device with checks on whether the device is available.""" - match cfg_device: + match try_device: case "cuda": assert torch.cuda.is_available() device = torch.device("cuda") @@ -55,9 +55,9 @@ def get_safe_torch_device(cfg_device: str, log: bool = False) -> torch.device: if log: logging.warning("Using CPU, this will be slow.") case _: - device = torch.device(cfg_device) + device = torch.device(try_device) if log: - logging.warning(f"Using custom {cfg_device} device.") + logging.warning(f"Using custom {try_device} device.") return device diff --git a/lerobot/configs/default.py b/lerobot/configs/default.py index 54c2b9302e..76912b1034 100644 --- a/lerobot/configs/default.py +++ b/lerobot/configs/default.py @@ -28,6 +28,7 @@ policies, # noqa: F401 ) from lerobot.common.datasets.transforms import ImageTransformsConfig +from lerobot.common.optim import OptimizerConfig from lerobot.configs.policies import PretrainedConfig @@ -39,7 +40,7 @@ class OfflineConfig: @dataclass class OnlineConfig: """ - The online training look looks something like: + The online training loop looks something like: ```python for i in range(steps): @@ -59,7 +60,7 @@ class OnlineConfig: # How many episodes to collect at once when we reach the online rollout part of the training loop. rollout_n_episodes: int = 1 # The number of environments to use in the gym.vector.VectorEnv. This ends up also being the batch size for - # the policy. Ideally you should set this to by an even divisor or online_rollout_n_episodes. + # the policy. Ideally you should set this to by an even divisor of rollout_n_episodes. rollout_batch_size: int = 1 # How many optimization steps (forward, backward, optimizer step) to do between running rollouts. steps_between_rollouts: int | None = None @@ -71,13 +72,13 @@ class OnlineConfig: # FIFO. buffer_capacity: int | None = None # The minimum number of frames to have in the online buffer before commencing online training. - # If online_buffer_seed_size > online_rollout_n_episodes, the rollout will be run multiple times until the + # If buffer_seed_size > rollout_n_episodes, the rollout will be run multiple times until the # seed size condition is satisfied. buffer_seed_size: int = 0 # Whether to run the online rollouts asynchronously. This means we can run the online training steps in # parallel with the rollouts. This might be advised if your GPU has the bandwidth to handle training # + eval + environment rendering simultaneously. - do_online_rollout_async: bool = False + do_rollout_async: bool = False @dataclass @@ -96,11 +97,16 @@ class TrainConfig: @dataclass class DatasetConfig: + # You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data + # keys common between the datasets are kept. Each dataset gets and additional transform that inserts the + # "dataset_index" into the returned item. The index mapping is made according to the order in which the + # datsets are provided. repo_id: str | list[str] episodes: list[int] | None = None image_transforms: ImageTransformsConfig = field(default_factory=ImageTransformsConfig) local_files_only: bool = False use_imagenet_stats: bool = True + video_backend: str = "pyav" @dataclass @@ -137,7 +143,7 @@ class WandBConfig: class MainConfig: policy: PretrainedConfig dataset: DatasetConfig - env: envs.EnvConfig = envs.RealEnv + env: envs.EnvConfig = field(default_factory=envs.RealEnv) # Set `dir` to where you would like to save all of the run outputs. If you run another training session # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. dir: Path | None = None @@ -153,13 +159,10 @@ class MainConfig: use_amp: bool = False # `seed` is used for training (eg: model initialization, dataset shuffling) # AND for the evaluation environments. - seed: int | None = None - # You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data - # keys common between the datasets are kept. Each dataset gets and additional transform that inserts the - # "dataset_index" into the returned item. The index mapping is made according to the order in which the - # datsets are provided. - video_backend: str = "pyav" + seed: int | None = 1000 training: TrainConfig = field(default_factory=TrainConfig) + use_policy_optimizer_preset: bool = True + optimizer: OptimizerConfig | None = None eval: EvalConfig = field(default_factory=EvalConfig) wandb: WandBConfig = field(default_factory=WandBConfig) @@ -175,6 +178,11 @@ def __post_init__(self): if self.training.online.steps > 0 and isinstance(self.dataset.repo_id, list): raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") + if not self.use_policy_optimizer_preset and self.optimizer is None: + raise ValueError("Either the policy optimizer preset or the optimizer must be used.") + elif self.use_policy_optimizer_preset: + self.optimizer = self.policy.optimizer_preset + # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need # to check for any differences between the provided config and the checkpoint's config. checkpoint_cfg_path = self.dir / "checkpoints/last/config.yaml" @@ -214,4 +222,6 @@ def __post_init__(self): @classmethod def from_checkpoint(cls, config_path: Path): - return draccus.load(cls, config_path) + with open(config_path) as f: + cfg = draccus.load(cls, f) + return cfg diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index 7c7d80c16c..a0e8a3ec11 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -1,10 +1,38 @@ +import abc from dataclasses import dataclass, field +from enum import Enum +from pprint import pformat import draccus +from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.common.optim.optimizers import OptimizerConfig + + +# Note: We subclass str so that serialization is straightforward +# https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json +class FeatureType(str, Enum): + STATE = "STATE" + VISUAL = "VISUAL" + ENV = "ENV" + ACTION = "ACTION" + + +class NormalizationMode(str, Enum): + MIN_MAX = "MIN_MAX" + MEAN_STD = "MEAN_STD" + @dataclass -class PretrainedConfig(draccus.ChoiceRegistry): +class PolicyFeature: + key: str + type: FeatureType + shape: list | tuple + normalization_mode: NormalizationMode + + +@dataclass +class PretrainedConfig(draccus.ChoiceRegistry, abc.ABC): """ Base configuration class for policy models. @@ -20,23 +48,122 @@ class PretrainedConfig(draccus.ChoiceRegistry): """ n_obs_steps: int = 1 - input_shapes: dict[str, list[int]] = field(default_factory=lambda: {}) - output_shapes: dict[str, list[int]] = field(default_factory=lambda: {}) - input_normalization_modes: dict[str, str] = field(default_factory=lambda: {}) - output_normalization_modes: dict[str, str] = field(default_factory=lambda: {}) + + robot_state_feature: PolicyFeature | None = None + env_state_feature: PolicyFeature | None = None + action_feature: PolicyFeature | None = None + image_features: list[PolicyFeature] | None = None + + normalization_mapping: dict[FeatureType, NormalizationMode] = field( + default_factory=lambda: { + FeatureType.STATE: NormalizationMode.MEAN_STD, + FeatureType.VISUAL: NormalizationMode.MEAN_STD, + FeatureType.ENV: NormalizationMode.MEAN_STD, + FeatureType.ACTION: NormalizationMode.MEAN_STD, + } + ) + + optimizer_preset: OptimizerConfig | None = None @property def type(self) -> str: return self.get_choice_name(self.__class__) - @property + @abc.abstractproperty def observation_delta_indices(self) -> list | None: raise NotImplementedError - @property + @abc.abstractproperty def action_delta_indices(self) -> list | None: raise NotImplementedError - @property + @abc.abstractproperty def reward_delta_indices(self) -> list | None: raise NotImplementedError + + @property + def input_features(self) -> list[PolicyFeature]: + input_features = [] + for ft in [self.robot_state_feature, self.env_state_feature, *self.image_features]: + if ft is not None: + input_features.append(ft) + + return input_features + + @property + def output_features(self) -> list[PolicyFeature]: + return [self.action_feature] + + def parse_features_from_dataset(self, ds_meta: LeRobotDatasetMetadata): + # TODO(aliberts): Implement PolicyFeature in LeRobotDataset and remove the need for this + robot_state_features = [] + env_state_features = [] + action_features = [] + image_features = [] + + for key in ds_meta.features: + if key in ds_meta.camera_keys: + shape = ds_meta.features[key]["shape"] + names = ds_meta.features[key]["names"] + if len(shape) != 3: + raise ValueError(f"Number of dimensions of {key} != 3 (shape={shape})") + if names[2] == "channel": # (h, w, c) -> (c, h, w) + shape = (shape[2], shape[0], shape[1]) + image_features.append( + PolicyFeature( + key=key, + type=FeatureType.VISUAL, + shape=shape, + normalization_mode=self.normalization_mapping[FeatureType.VISUAL], + ) + ) + elif key == "observation.environment_state": + env_state_features.append( + PolicyFeature( + key=key, + type=FeatureType.ENV, + shape=ds_meta.features[key]["shape"], + normalization_mode=self.normalization_mapping[FeatureType.ENV], + ) + ) + elif key.startswith("observation"): + robot_state_features.append( + PolicyFeature( + key=key, + type=FeatureType.STATE, + shape=ds_meta.features[key]["shape"], + normalization_mode=self.normalization_mapping[FeatureType.STATE], + ) + ) + elif key == "action": + action_features.append( + PolicyFeature( + key=key, + type=FeatureType.ACTION, + shape=ds_meta.features[key]["shape"], + normalization_mode=self.normalization_mapping[FeatureType.ACTION], + ) + ) + + if len(robot_state_features) > 1: + raise ValueError( + "Found multiple features for the robot's state. Please select only one or concatenate them." + f"Robot state features found:\n{pformat(robot_state_features)}" + ) + + if len(env_state_features) > 1: + raise ValueError( + "Found multiple features for the env's state. Please select only one or concatenate them." + f"Env state features found:\n{pformat(env_state_features)}" + ) + + if len(action_features) > 1: + raise ValueError( + "Found multiple features for the action. Please select only one or concatenate them." + f"Action features found:\n{pformat(action_features)}" + ) + + self.robot_state_feature = robot_state_features[0] if len(robot_state_features) == 1 else None + self.env_state_feature = env_state_features[0] if len(env_state_features) == 1 else None + self.action_feature = action_features[0] if len(action_features) == 1 else None + self.image_features = image_features diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index d8d18b6aea..80ef41eaa6 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -19,24 +19,22 @@ from contextlib import nullcontext from copy import deepcopy from dataclasses import asdict -from pathlib import Path from pprint import pformat from threading import Lock import draccus -import hydra import numpy as np import torch -from torch import nn -from torch.cuda.amp import GradScaler +from torch.amp import GradScaler from lerobot.common.datasets.factory import make_dataset, resolve_delta_timestamps -from lerobot.common.datasets.lerobot_dataset import MultiLeRobotDataset +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset from lerobot.common.datasets.online_buffer import OnlineBuffer, compute_sampler_weights from lerobot.common.datasets.sampler import EpisodeAwareSampler from lerobot.common.datasets.utils import cycle from lerobot.common.envs.factory import make_env from lerobot.common.logger import Logger, log_output_dir +from lerobot.common.optim.factory import make_optimizer_and_scheduler from lerobot.common.policies.factory import make_policy from lerobot.common.policies.policy_protocol import PolicyWithUpdate from lerobot.common.policies.utils import get_device_from_parameters @@ -50,59 +48,6 @@ from lerobot.scripts.eval import eval_policy -def make_optimizer_and_scheduler(cfg: MainConfig, policy): - if cfg.policy.name == "act": - optimizer_params_dicts = [ - { - "params": [ - p - for n, p in policy.named_parameters() - if not n.startswith("model.backbone") and p.requires_grad - ] - }, - { - "params": [ - p - for n, p in policy.named_parameters() - if n.startswith("model.backbone") and p.requires_grad - ], - "lr": cfg.training.lr_backbone, - }, - ] - optimizer = torch.optim.AdamW( - optimizer_params_dicts, lr=cfg.training.lr, weight_decay=cfg.training.weight_decay - ) - lr_scheduler = None - elif cfg.policy.name == "diffusion": - optimizer = torch.optim.Adam( - policy.diffusion.parameters(), - cfg.training.lr, - cfg.training.adam_betas, - cfg.training.adam_eps, - cfg.training.adam_weight_decay, - ) - from diffusers.optimization import get_scheduler - - lr_scheduler = get_scheduler( - cfg.training.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=cfg.training.lr_warmup_steps, - num_training_steps=cfg.training.offline_steps, - ) - elif policy.name == "tdmpc": - optimizer = torch.optim.Adam(policy.parameters(), cfg.training.lr) - lr_scheduler = None - elif cfg.policy.name == "vqbet": - from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTOptimizer, VQBeTScheduler - - optimizer = VQBeTOptimizer(policy, cfg) - lr_scheduler = VQBeTScheduler(optimizer, cfg) - else: - raise NotImplementedError() - - return optimizer, lr_scheduler - - def update_policy( policy, batch, @@ -160,7 +105,9 @@ def update_policy( return info -def log_train_info(logger: Logger, info, step, cfg, dataset, is_online): +def log_train_info( + logger: Logger, info: dict, step: int, cfg: MainConfig, dataset: LeRobotDataset, is_online: bool +): loss = info["loss"] grad_norm = info["grad_norm"] lr = info["lr"] @@ -234,70 +181,10 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): @draccus.wrap() -def train(cfg: MainConfig, out_dir: str | None = None, job_name: str | None = None): - # if out_dir is None: - # raise NotImplementedError() - # if job_name is None: - # raise NotImplementedError() - +def train(cfg: MainConfig): init_logging() logging.info(pformat(asdict(cfg))) - # if cfg.training.online_steps > 0 and isinstance(cfg.dataset_repo_id, ListConfig): - # raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") - - # # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need - # # to check for any differences between the provided config and the checkpoint's config. - # if cfg.resume: - # if not Logger.get_last_checkpoint_dir(out_dir).exists(): - # raise RuntimeError( - # "You have set resume=True, but there is no model checkpoint in " - # f"{Logger.get_last_checkpoint_dir(out_dir)}" - # ) - # checkpoint_cfg_path = str(Logger.get_last_pretrained_model_dir(out_dir) / "config.yaml") - # logging.info( - # colored( - # "You have set resume=True, indicating that you wish to resume a run", - # color="yellow", - # attrs=["bold"], - # ) - # ) - # # Get the configuration file from the last checkpoint. - # checkpoint_cfg = init_hydra_config(checkpoint_cfg_path) - # # Check for differences between the checkpoint configuration and provided configuration. - # # Hack to resolve the delta_timestamps ahead of time in order to properly diff. - # resolve_delta_timestamps(cfg) - # diff = DeepDiff(OmegaConf.to_container(checkpoint_cfg), OmegaConf.to_container(cfg)) - # # Ignore the `resume` and parameters. - # if "values_changed" in diff and "root['resume']" in diff["values_changed"]: - # del diff["values_changed"]["root['resume']"] - # # Log a warning about differences between the checkpoint configuration and the provided - # # configuration. - # if len(diff) > 0: - # logging.warning( - # "At least one difference was detected between the checkpoint configuration and " - # f"the provided configuration: \n{pformat(diff)}\nNote that the checkpoint configuration " - # "takes precedence.", - # ) - # # Use the checkpoint config instead of the provided config (but keep `resume` parameter). - # cfg = checkpoint_cfg - # cfg.resume = True - # elif Logger.get_last_checkpoint_dir(out_dir).exists(): - # raise RuntimeError( - # f"The configured output directory {Logger.get_last_checkpoint_dir(out_dir)} already exists. If " - # "you meant to resume training, please use `resume=true` in your command or yaml configuration." - # ) - - # if cfg.eval.batch_size > cfg.eval.n_episodes: - # raise ValueError( - # "The eval batch size is greater than the number of eval episodes " - # f"({cfg.eval.batch_size} > {cfg.eval.n_episodes}). As a result, {cfg.eval.batch_size} " - # f"eval environments will be instantiated, but only {cfg.eval.n_episodes} will be used. " - # "This might significantly slow down evaluation. To fix this, you should update your command " - # f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={cfg.eval.batch_size}`), " - # f"or lower the batch size (e.g. `eval.batch_size={cfg.eval.n_episodes}`)." - # ) - # log metrics to terminal and wandb logger = Logger(cfg) @@ -310,33 +197,26 @@ def train(cfg: MainConfig, out_dir: str | None = None, job_name: str | None = No torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True - logging.info("make_dataset") + logging.info("Creating dataset") offline_dataset = make_dataset(cfg) - if isinstance(offline_dataset, MultiLeRobotDataset): - logging.info( - "Multiple datasets were provided. Applied the following index mapping to the provided datasets: " - f"{pformat(offline_dataset.repo_id_to_index , indent=2)}" - ) # Create environment used for evaluating checkpoints during training on simulation data. # On real-world data, no need to create an environment as evaluations are done outside train.py, # using the eval.py instead, with gym_dora environment and dora-rs. eval_env = None - if cfg.training.eval_freq > 0: - logging.info("make_env") + if cfg.training.eval_freq > 0 and cfg.env.type != "real_world": + logging.info("Creating env") eval_env = make_env(cfg) - logging.info("make_policy") + logging.info("Creating policy") policy = make_policy( - hydra_cfg=cfg, - dataset_stats=offline_dataset.meta.stats if not cfg.resume else None, + cfg=cfg, + ds_meta=offline_dataset.meta, pretrained_policy_name_or_path=str(logger.last_pretrained_model_dir) if cfg.resume else None, ) - assert isinstance(policy, nn.Module) - # Create optimizer and scheduler - # Temporary hack to move optimizer out of policy + logging.info("Creating optimizer and scheduler") optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy) - grad_scaler = GradScaler(enabled=cfg.use_amp) + grad_scaler = GradScaler(device, enabled=cfg.use_amp) step = 0 # number of policy updates (forward + backward + optim) @@ -346,18 +226,18 @@ def train(cfg: MainConfig, out_dir: str | None = None, job_name: str | None = No num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad) num_total_params = sum(p.numel() for p in policy.parameters()) - log_output_dir(out_dir) + log_output_dir(cfg.dir) logging.info(f"{cfg.env.task=}") - logging.info(f"{cfg.training.offline_steps=} ({format_big_number(cfg.training.offline_steps)})") - logging.info(f"{cfg.training.online_steps=}") + logging.info(f"{cfg.training.offline.steps=} ({format_big_number(cfg.training.offline.steps)})") + logging.info(f"{cfg.training.online.steps=}") logging.info(f"{offline_dataset.num_frames=} ({format_big_number(offline_dataset.num_frames)})") logging.info(f"{offline_dataset.num_episodes=}") logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})") logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})") # Note: this helper will be used in offline and online training loops. - def evaluate_and_checkpoint_if_needed(step, is_online): - _num_digits = max(6, len(str(cfg.training.offline_steps + cfg.training.online_steps))) + def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): + _num_digits = max(6, len(str(cfg.training.offline.steps + cfg.training.online.steps))) step_identifier = f"{step:0{_num_digits}d}" if cfg.training.eval_freq > 0 and step % cfg.training.eval_freq == 0: @@ -368,7 +248,7 @@ def evaluate_and_checkpoint_if_needed(step, is_online): eval_env, policy, cfg.eval.n_episodes, - videos_dir=Path(out_dir) / "eval" / f"videos_step_{step_identifier}", + videos_dir=cfg.dir / "eval" / f"videos_step_{step_identifier}", max_episodes_rendered=4, start_seed=cfg.seed, ) @@ -379,7 +259,7 @@ def evaluate_and_checkpoint_if_needed(step, is_online): if cfg.training.save_checkpoint and ( step % cfg.training.save_freq == 0 - or step == cfg.training.offline_steps + cfg.training.online_steps + or step == cfg.training.offline.steps + cfg.training.online.steps ): logging.info(f"Checkpoint policy after step {step}") # Note: Save with step as the identifier, and format it to have at least 6 digits but more if @@ -394,11 +274,11 @@ def evaluate_and_checkpoint_if_needed(step, is_online): logging.info("Resume training") # create dataloader for offline training - if cfg.training.get("drop_n_last_frames"): + if getattr(cfg.policy, "drop_n_last_frames", None): shuffle = False sampler = EpisodeAwareSampler( offline_dataset.episode_data_index, - drop_n_last_frames=cfg.training.drop_n_last_frames, + drop_n_last_frames=cfg.policy.drop_n_last_frames, shuffle=True, ) else: @@ -417,7 +297,7 @@ def evaluate_and_checkpoint_if_needed(step, is_online): policy.train() offline_step = 0 - for _ in range(step, cfg.training.offline_steps): + for _ in range(step, cfg.training.offline.steps): if offline_step == 0: logging.info("Start offline training on a fixed dataset") @@ -432,7 +312,7 @@ def evaluate_and_checkpoint_if_needed(step, is_online): policy, batch, optimizer, - cfg.training.grad_clip_norm, + cfg.optimizer.grad_clip_norm, grad_scaler=grad_scaler, lr_scheduler=lr_scheduler, use_amp=cfg.use_amp, @@ -450,7 +330,7 @@ def evaluate_and_checkpoint_if_needed(step, is_online): step += 1 offline_step += 1 # noqa: SIM113 - if cfg.training.online_steps == 0: + if cfg.training.online.steps == 0: if eval_env: eval_env.close() logging.info("End of training") @@ -459,8 +339,8 @@ def evaluate_and_checkpoint_if_needed(step, is_online): # Online training. # Create an env dedicated to online episodes collection from policy rollout. - online_env = make_env(cfg, n_envs=cfg.training.online_rollout_batch_size) - resolve_delta_timestamps(cfg) + online_env = make_env(cfg) + detla_timestamps = resolve_delta_timestamps(cfg.policy, offline_dataset.meta) online_buffer_path = logger.log_dir / "online_buffer" if cfg.resume and not online_buffer_path.exists(): # If we are resuming a run, we default to the data shapes and buffer capacity from the saved online @@ -474,31 +354,37 @@ def evaluate_and_checkpoint_if_needed(step, is_online): online_dataset = OnlineBuffer( online_buffer_path, data_spec={ - **{k: {"shape": v, "dtype": np.dtype("float32")} for k, v in policy.config.input_shapes.items()}, - **{k: {"shape": v, "dtype": np.dtype("float32")} for k, v in policy.config.output_shapes.items()}, + **{ + ft.key: {"shape": ft.shape, "dtype": np.dtype("float32")} + for ft in policy.config.input_features + }, + **{ + ft.key: {"shape": ft.shape, "dtype": np.dtype("float32")} + for ft in policy.config.output_features + }, "next.reward": {"shape": (), "dtype": np.dtype("float32")}, "next.done": {"shape": (), "dtype": np.dtype("?")}, "next.success": {"shape": (), "dtype": np.dtype("?")}, }, - buffer_capacity=cfg.training.online_buffer_capacity, + buffer_capacity=cfg.training.online.buffer_capacity, fps=online_env.unwrapped.metadata["render_fps"], - delta_timestamps=cfg.training.delta_timestamps, + delta_timestamps=detla_timestamps, ) # If we are doing online rollouts asynchronously, deepcopy the policy to use for online rollouts (this # makes it possible to do online rollouts in parallel with training updates). - online_rollout_policy = deepcopy(policy) if cfg.training.do_online_rollout_async else policy + online_rollout_policy = deepcopy(policy) if cfg.training.online.do_rollout_async else policy # Create dataloader for online training. concat_dataset = torch.utils.data.ConcatDataset([offline_dataset, online_dataset]) sampler_weights = compute_sampler_weights( offline_dataset, - offline_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0), + offline_drop_n_last_frames=getattr(cfg.policy, "drop_n_last_frames", 0), online_dataset=online_dataset, # +1 because online rollouts return an extra frame for the "final observation". Note: we don't have # this final observation in the offline datasets, but we might add them in future. - online_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0) + 1, - online_sampling_ratio=cfg.training.online_sampling_ratio, + online_drop_n_last_frames=getattr(cfg.policy, "drop_n_last_frames", 0) + 1, + online_sampling_ratio=cfg.training.online.sampling_ratio, ) sampler = torch.utils.data.WeightedRandomSampler( sampler_weights, @@ -528,10 +414,10 @@ def evaluate_and_checkpoint_if_needed(step, is_online): # Time taken waiting for the online buffer to finish being updated. This is relevant when using the async # online rollout option. await_update_online_buffer_s = 0 - rollout_start_seed = cfg.training.online_env_seed + rollout_start_seed = cfg.training.online.env_seed while True: - if online_step == cfg.training.online_steps: + if online_step == cfg.training.online.steps: break if online_step == 0: @@ -547,8 +433,8 @@ def sample_trajectory_and_update_buffer(): eval_info = eval_policy( online_env, online_rollout_policy, - n_episodes=cfg.training.online_rollout_n_episodes, - max_episodes_rendered=min(10, cfg.training.online_rollout_n_episodes), + n_episodes=cfg.training.online.rollout_n_episodes, + max_episodes_rendered=min(10, cfg.training.online.rollout_n_episodes), videos_dir=logger.log_dir / "online_rollout_videos", return_episode_data=True, start_seed=( @@ -567,12 +453,12 @@ def sample_trajectory_and_update_buffer(): # Update the sampling weights. sampler.weights = compute_sampler_weights( offline_dataset, - offline_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0), + offline_drop_n_last_frames=getattr(cfg.policy, "drop_n_last_frames", 0), online_dataset=online_dataset, # +1 because online rollouts return an extra frame for the "final observation". Note: we don't have # this final observation in the offline datasets, but we might add them in future. - online_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0) + 1, - online_sampling_ratio=cfg.training.online_sampling_ratio, + online_drop_n_last_frames=getattr(cfg.policy, "drop_n_last_frames", 0) + 1, + online_sampling_ratio=cfg.training.online.sampling_ratio, ) sampler.num_frames = len(concat_dataset) @@ -584,32 +470,32 @@ def sample_trajectory_and_update_buffer(): # If we aren't doing async rollouts, or if we haven't yet gotten enough examples in our buffer, wait # here until the rollout and buffer update is done, before proceeding to the policy update steps. if ( - not cfg.training.do_online_rollout_async - or len(online_dataset) <= cfg.training.online_buffer_seed_size + not cfg.training.online.do_rollout_async + or len(online_dataset) <= cfg.training.online.buffer_seed_size ): online_rollout_s, update_online_buffer_s = future.result() - if len(online_dataset) <= cfg.training.online_buffer_seed_size: + if len(online_dataset) <= cfg.training.online.buffer_seed_size: logging.info( - f"Seeding online buffer: {len(online_dataset)}/{cfg.training.online_buffer_seed_size}" + f"Seeding online buffer: {len(online_dataset)}/{cfg.training.online.buffer_seed_size}" ) continue policy.train() - for _ in range(cfg.training.online_steps_between_rollouts): + for _ in range(cfg.training.online.steps_between_rollouts): with lock: start_time = time.perf_counter() batch = next(dl_iter) dataloading_s = time.perf_counter() - start_time for key in batch: - batch[key] = batch[key].to(cfg.device, non_blocking=True) + batch[key] = batch[key].to(device, non_blocking=True) train_info = update_policy( policy, batch, optimizer, - cfg.training.grad_clip_norm, + cfg.optimizer.grad_clip_norm, grad_scaler=grad_scaler, lr_scheduler=lr_scheduler, use_amp=cfg.use_amp, @@ -640,7 +526,7 @@ def sample_trajectory_and_update_buffer(): online_rollout_s, update_online_buffer_s = future.result() await_update_online_buffer_s = time.perf_counter() - start - if online_step >= cfg.training.online_steps: + if online_step >= cfg.training.online.steps: break if eval_env: @@ -648,24 +534,5 @@ def sample_trajectory_and_update_buffer(): logging.info("End of training") -@hydra.main(version_base="1.2", config_name="default", config_path="../configs") -def train_cli(cfg: dict): - train( - cfg, - out_dir=hydra.core.hydra_config.HydraConfig.get().run.dir, - job_name=hydra.core.hydra_config.HydraConfig.get().job.name, - ) - - -def train_notebook(out_dir=None, job_name=None, config_name="default", config_path="../configs"): - from hydra import compose, initialize - - hydra.core.global_hydra.GlobalHydra.instance().clear() - initialize(config_path=config_path) - cfg = compose(config_name=config_name) - train(cfg, out_dir=out_dir, job_name=job_name) - - if __name__ == "__main__": - # train_cli() train() From bed1ec33f303e7677035c5c0714ebdbd5ac8c7a0 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 23 Dec 2024 12:27:24 +0100 Subject: [PATCH 03/94] Add policies training presets --- lerobot/common/optim/factory.py | 38 +---------- lerobot/common/optim/optimizers.py | 22 +++++-- lerobot/common/optim/schedulers.py | 65 +++++++++++++++++++ .../common/policies/act/configuration_act.py | 42 ++++++------ lerobot/common/policies/act/modeling_act.py | 4 +- lerobot/common/policies/factory.py | 12 ++++ lerobot/configs/default.py | 13 ++-- lerobot/configs/policies.py | 29 ++++----- 8 files changed, 143 insertions(+), 82 deletions(-) create mode 100644 lerobot/common/optim/schedulers.py diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py index cfa10fb690..d5440f66dd 100644 --- a/lerobot/common/optim/factory.py +++ b/lerobot/common/optim/factory.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LRScheduler @@ -23,40 +22,7 @@ def make_optimizer_and_scheduler(cfg: MainConfig, policy: Policy) -> tuple[Optimizer, LRScheduler | None]: - params = policy.get_optim_params() if cfg.use_policy_optimizer_preset else policy.parameters() + params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters() optimizer = cfg.optimizer.build(params) - - if not cfg.use_policy_optimizer_preset: - lr_scheduler = None - return optimizer, lr_scheduler - - if cfg.policy.type == "act": - lr_scheduler = None - elif cfg.policy.type == "diffusion": - from diffusers.optimization import get_scheduler - - lr_scheduler = get_scheduler( - cfg.training.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=cfg.training.lr_warmup_steps, - num_training_steps=cfg.training.offline_steps, - ) - elif cfg.policy.type == "tdmpc": - optimizer = torch.optim.Adam(params, cfg.training.lr) - lr_scheduler = None - elif cfg.policy.type == "vqbet": - # from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTOptimizer, VQBeTScheduler - # optimizer = VQBeTOptimizer(policy, cfg) - optimizer = torch.optim.Adam( - params, - cfg.optimizer.lr, - cfg.optimizer.adam_betas, - cfg.optimizer.adam_eps, - ) - from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTScheduler - - lr_scheduler = VQBeTScheduler(optimizer, cfg) - else: - raise NotImplementedError() - + lr_scheduler = cfg.scheduler.build(optimizer, cfg.training.offline.steps) return optimizer, lr_scheduler diff --git a/lerobot/common/optim/optimizers.py b/lerobot/common/optim/optimizers.py index b085211227..8a19615b67 100644 --- a/lerobot/common/optim/optimizers.py +++ b/lerobot/common/optim/optimizers.py @@ -7,11 +7,11 @@ @dataclass class OptimizerConfig(draccus.ChoiceRegistry, abc.ABC): - lr: float = 1e-5 - betas: tuple[float, float] = (0.9, 0.999) - eps: float = 1e-8 - weight_decay: float = 1e-4 - grad_clip_norm: float = 10.0 + lr: float + betas: tuple[float, float] + eps: float + weight_decay: float + grad_clip_norm: float @property def type(self) -> str: @@ -29,6 +29,12 @@ def build(self) -> torch.optim.Optimizer: @OptimizerConfig.register_subclass("adam") @dataclass class AdamConfig(OptimizerConfig): + lr: float = 1e-3 + betas: tuple[float, float] = (0.9, 0.999) + eps: float = 1e-8 + weight_decay: float = 0.0 + grad_clip_norm: float = 10.0 + def build(self, params: dict) -> torch.optim.Optimizer: kwargs = asdict(self) kwargs.pop("grad_clip_norm") @@ -38,6 +44,12 @@ def build(self, params: dict) -> torch.optim.Optimizer: @OptimizerConfig.register_subclass("adamw") @dataclass class AdamWConfig(OptimizerConfig): + lr: float = 1e-3 + betas: tuple[float, float] = (0.9, 0.999) + eps: float = 1e-8 + weight_decay: float = 1e-2 + grad_clip_norm: float = 10.0 + def build(self, params: dict) -> torch.optim.Optimizer: kwargs = asdict(self) kwargs.pop("grad_clip_norm") diff --git a/lerobot/common/optim/schedulers.py b/lerobot/common/optim/schedulers.py new file mode 100644 index 0000000000..36e2112ec1 --- /dev/null +++ b/lerobot/common/optim/schedulers.py @@ -0,0 +1,65 @@ +import abc +import math +from dataclasses import asdict, dataclass + +import draccus +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LambdaLR, LRScheduler + + +@dataclass +class LRSchedulerConfig(draccus.ChoiceRegistry, abc.ABC): + num_warmup_steps: int + + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + @abc.abstractmethod + def build(self, optimizer: Optimizer, num_training_steps: int) -> LRScheduler | None: + raise NotImplementedError + + +@LRSchedulerConfig.register_subclass("none") +@dataclass +class NoneSchedulerConfig(LRSchedulerConfig): + num_warmup_steps: int = 0 + + def build(self, optimizer: Optimizer, num_training_steps: int) -> None: + return None + + +@LRSchedulerConfig.register_subclass("diffuser") +@dataclass +class DiffuserSchedulerConfig(LRSchedulerConfig): + name: str = "cosine" + num_warmup_steps: int | None = None + + def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: + from diffusers.optimization import get_scheduler + + kwargs = {**asdict(self), "num_training_steps": num_training_steps, "optimizer": optimizer} + return get_scheduler(**kwargs) + + +@LRSchedulerConfig.register_subclass("vqbet") +@dataclass +class VQBeTSchedulerConfig(LRSchedulerConfig): + num_warmup_steps: int + num_vqvae_training_steps: int + num_cycles: float = 0.5 + + def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: + def lr_lambda(current_step): + if current_step < self.num_vqvae_training_steps: + return float(1) + else: + adjusted_step = current_step - self.num_vqvae_training_steps + if adjusted_step < self.num_warmup_steps: + return float(adjusted_step) / float(max(1, self.num_warmup_steps)) + progress = float(adjusted_step - self.num_warmup_steps) / float( + max(1, num_training_steps - self.num_warmup_steps) + ) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(self.num_cycles) * 2.0 * progress))) + + return LambdaLR(optimizer, lr_lambda, -1) diff --git a/lerobot/common/policies/act/configuration_act.py b/lerobot/common/policies/act/configuration_act.py index b22989f3d5..2cc7112be8 100644 --- a/lerobot/common/policies/act/configuration_act.py +++ b/lerobot/common/policies/act/configuration_act.py @@ -16,7 +16,8 @@ from dataclasses import dataclass, field from lerobot.common.optim.optimizers import AdamWConfig -from lerobot.configs.policies import FeatureType, NormalizationMode, PretrainedConfig +from lerobot.common.optim.schedulers import NoneSchedulerConfig +from lerobot.configs.policies import NormalizationMode, PretrainedConfig @PretrainedConfig.register_subclass("act") @@ -94,11 +95,11 @@ class ACTConfig(PretrainedConfig): chunk_size: int = 100 n_action_steps: int = 100 - normalization_mapping: dict[FeatureType, NormalizationMode] = field( + normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { - FeatureType.VISUAL: NormalizationMode.MEAN_STD, - FeatureType.STATE: NormalizationMode.MEAN_STD, - FeatureType.ACTION: NormalizationMode.MEAN_STD, + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MEAN_STD, + "ACTION": NormalizationMode.MEAN_STD, } ) @@ -131,14 +132,10 @@ class ACTConfig(PretrainedConfig): dropout: float = 0.1 kl_weight: float = 10.0 - # Optimizer presets - optimizer_preset: AdamWConfig = field( - default_factory=lambda: AdamWConfig( - lr=1e-5, - weight_decay=1e-4, - ) - ) - lr_backbone: float = 1e-5 + # Training preset + optimizer_lr = 1e-5 + optimizer_weight_decay = 1e-4 + optimizer_lr_backbone: float = 1e-5 def __post_init__(self): """Input validation (not exhaustive).""" @@ -160,12 +157,19 @@ def __post_init__(self): raise ValueError( f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" ) - # TODO(aliberts): move this check into the policy init - # if ( - # not any(k.startswith("observation.image") for k in self.input_shapes) - # and "observation.environment_state" not in self.input_shapes - # ): - # raise ValueError("You must provide at least one image or the environment state among the inputs.") + + def get_optimizer_preset(self) -> AdamWConfig: + return AdamWConfig( + lr=self.optimizer_lr, + weight_decay=self.optimizer_weight_decay, + ) + + def get_scheduler_preset(self) -> NoneSchedulerConfig: + return NoneSchedulerConfig() + + def validate_features(self) -> None: + if not self.image_features and not self.env_state_feature: + raise ValueError("You must provide at least one image or the environment state among the inputs.") @property def observation_delta_indices(self) -> None: diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index c181b02d08..da66b5aaa1 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -65,6 +65,7 @@ def __init__( that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__() + config.validate_features() self.config = config self.normalize_inputs = Normalize(config.input_features, dataset_stats) @@ -95,7 +96,7 @@ def get_optim_params(self) -> dict: for n, p in self.named_parameters() if n.startswith("model.backbone") and p.requires_grad ], - "lr": self.config.lr_backbone, + "lr": self.config.optimizer_lr_backbone, }, ] @@ -320,7 +321,6 @@ def __init__(self, config: ACTConfig): self.vae_encoder_action_input_proj = nn.Linear( self.config.action_feature.shape[0], config.dim_model, - # config.output_shapes["action"][0], config.dim_model ) # Projection layer from the VAE encoder's output to the latent distribution's parameter space. self.vae_encoder_latent_output_proj = nn.Linear(config.dim_model, config.latent_dim * 2) diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 2d4a2c07d1..5e768c1e23 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -57,6 +57,18 @@ def make_policy( directory containing weights saved using `Policy.save_pretrained`. Note that providing this argument overrides everything in `hydra_cfg.policy` apart from `hydra_cfg.policy.type`. """ + # Note: Currently, if you try to run vqbet with mps backend, you'll get this error. + # NotImplementedError: The operator 'aten::unique_dim' is not currently implemented for the MPS device. If + # you want this op to be added in priority during the prototype phase of this feature, please comment on + # https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment + # variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be + # slower than running natively on MPS. + if cfg.policy.type == "vqbet" and cfg.device == "mps": + raise NotImplementedError( + "Current implementation of VQBeT does not support `mps` backend. " + "Please use `cpu` or `cuda` backend." + ) + policy_cls = get_policy_class(cfg.policy.type) cfg.policy.parse_features_from_dataset(ds_meta) diff --git a/lerobot/configs/default.py b/lerobot/configs/default.py index 76912b1034..8efe2f96ad 100644 --- a/lerobot/configs/default.py +++ b/lerobot/configs/default.py @@ -29,6 +29,7 @@ ) from lerobot.common.datasets.transforms import ImageTransformsConfig from lerobot.common.optim import OptimizerConfig +from lerobot.common.optim.schedulers import LRSchedulerConfig from lerobot.configs.policies import PretrainedConfig @@ -161,8 +162,9 @@ class MainConfig: # AND for the evaluation environments. seed: int | None = 1000 training: TrainConfig = field(default_factory=TrainConfig) - use_policy_optimizer_preset: bool = True + use_policy_training_preset: bool = True optimizer: OptimizerConfig | None = None + scheduler: LRSchedulerConfig | None = None eval: EvalConfig = field(default_factory=EvalConfig) wandb: WandBConfig = field(default_factory=WandBConfig) @@ -178,10 +180,11 @@ def __post_init__(self): if self.training.online.steps > 0 and isinstance(self.dataset.repo_id, list): raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") - if not self.use_policy_optimizer_preset and self.optimizer is None: - raise ValueError("Either the policy optimizer preset or the optimizer must be used.") - elif self.use_policy_optimizer_preset: - self.optimizer = self.policy.optimizer_preset + if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): + raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") + elif self.use_policy_training_preset: + self.optimizer = self.policy.get_optimizer_preset() + self.scheduler = self.policy.get_scheduler_preset() # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need # to check for any differences between the provided config and the checkpoint's config. diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index a0e8a3ec11..022c5cb791 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -7,6 +7,7 @@ from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata from lerobot.common.optim.optimizers import OptimizerConfig +from lerobot.common.optim.schedulers import LRSchedulerConfig # Note: We subclass str so that serialization is straightforward @@ -49,21 +50,7 @@ class PretrainedConfig(draccus.ChoiceRegistry, abc.ABC): n_obs_steps: int = 1 - robot_state_feature: PolicyFeature | None = None - env_state_feature: PolicyFeature | None = None - action_feature: PolicyFeature | None = None - image_features: list[PolicyFeature] | None = None - - normalization_mapping: dict[FeatureType, NormalizationMode] = field( - default_factory=lambda: { - FeatureType.STATE: NormalizationMode.MEAN_STD, - FeatureType.VISUAL: NormalizationMode.MEAN_STD, - FeatureType.ENV: NormalizationMode.MEAN_STD, - FeatureType.ACTION: NormalizationMode.MEAN_STD, - } - ) - - optimizer_preset: OptimizerConfig | None = None + normalization_mapping: dict[str, NormalizationMode] = field(default_factory=dict) @property def type(self) -> str: @@ -81,6 +68,18 @@ def action_delta_indices(self) -> list | None: def reward_delta_indices(self) -> list | None: raise NotImplementedError + @abc.abstractmethod + def get_optimizer_preset(self) -> OptimizerConfig: + raise NotImplementedError + + @abc.abstractmethod + def get_scheduler_preset(self) -> LRSchedulerConfig: + raise NotImplementedError + + @abc.abstractmethod + def validate_features(self) -> None: + raise NotImplementedError + @property def input_features(self) -> list[PolicyFeature]: input_features = [] From 0ab28eba53093c139a4570ffbb78a11a8a5d7ab7 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 23 Dec 2024 12:33:50 +0100 Subject: [PATCH 04/94] Update diffusion policy --- .../diffusion/configuration_diffusion.py | 97 +++++++++++-------- .../policies/diffusion/modeling_diffusion.py | 91 ++++++++--------- lerobot/common/policies/utils.py | 1 - 3 files changed, 96 insertions(+), 93 deletions(-) diff --git a/lerobot/common/policies/diffusion/configuration_diffusion.py b/lerobot/common/policies/diffusion/configuration_diffusion.py index 64a66dfd15..ed6aa7ad1b 100644 --- a/lerobot/common/policies/diffusion/configuration_diffusion.py +++ b/lerobot/common/policies/diffusion/configuration_diffusion.py @@ -16,7 +16,9 @@ # limitations under the License. from dataclasses import dataclass, field -from lerobot.configs.policies import PretrainedConfig +from lerobot.common.optim.optimizers import AdamConfig +from lerobot.common.optim.schedulers import DiffuserSchedulerConfig +from lerobot.configs.policies import NormalizationMode, PretrainedConfig @PretrainedConfig.register_subclass("diffusion") @@ -105,26 +107,17 @@ class DiffusionConfig(PretrainedConfig): horizon: int = 16 n_action_steps: int = 8 - input_shapes: dict[str, list[int]] = field( + normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { - "observation.image": [3, 96, 96], - "observation.state": [2], - } - ) - output_shapes: dict[str, list[int]] = field( - default_factory=lambda: { - "action": [2], + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MIN_MAX, + "ACTION": NormalizationMode.MIN_MAX, } ) - # Normalization / Unnormalization - input_normalization_modes: dict[str, str] = field( - default_factory=lambda: { - "observation.image": "mean_std", - "observation.state": "min_max", - } - ) - output_normalization_modes: dict[str, str] = field(default_factory=lambda: {"action": "min_max"}) + # The original implementation doesn't sample frames for the last 7 steps, + # which avoids excessive padding and leads to improved training results. + drop_n_last_frames: int = 7 # horizon - n_action_steps - n_obs_steps + 1 # Architecture / modeling. # Vision backbone. @@ -157,6 +150,14 @@ class DiffusionConfig(PretrainedConfig): # Loss computation do_mask_loss_for_padding: bool = False + # Training presets + optimizer_lr: float = 1e-4 + optimizer_betas: tuple = (0.95, 0.999) + optimizer_eps: float = 1e-8 + optimizer_weight_decay: float = 1e-6 + scheduler_name: str = "cosine" + scheduler_warmup_steps: int = 500 + def __post_init__(self): """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): @@ -164,32 +165,6 @@ def __post_init__(self): f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." ) - image_keys = {k for k in self.input_shapes if k.startswith("observation.image")} - - if len(image_keys) == 0 and "observation.environment_state" not in self.input_shapes: - raise ValueError("You must provide at least one image or the environment state among the inputs.") - - if len(image_keys) > 0: - if self.crop_shape is not None: - for image_key in image_keys: - if ( - self.crop_shape[0] > self.input_shapes[image_key][1] - or self.crop_shape[1] > self.input_shapes[image_key][2] - ): - raise ValueError( - f"`crop_shape` should fit within `input_shapes[{image_key}]`. Got {self.crop_shape} " - f"for `crop_shape` and {self.input_shapes[image_key]} for " - "`input_shapes[{image_key}]`." - ) - # Check that all input images have the same shape. - first_image_key = next(iter(image_keys)) - for image_key in image_keys: - if self.input_shapes[image_key] != self.input_shapes[first_image_key]: - raise ValueError( - f"`input_shapes[{image_key}]` does not match `input_shapes[{first_image_key}]`, but we " - "expect all image shapes to match." - ) - supported_prediction_types = ["epsilon", "sample"] if self.prediction_type not in supported_prediction_types: raise ValueError( @@ -211,6 +186,42 @@ def __post_init__(self): f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}" ) + def get_optimizer_preset(self) -> AdamConfig: + return AdamConfig( + lr=self.optimizer_lr, + betas=self.optimizer_betas, + eps=self.optimizer_eps, + weight_decay=self.optimizer_weight_decay, + ) + + def get_scheduler_preset(self) -> DiffuserSchedulerConfig: + return DiffuserSchedulerConfig( + name=self.scheduler_name, + num_warmup_steps=self.scheduler_warmup_steps, + ) + + def validate_features(self) -> None: + if len(self.image_features) == 0 and self.env_state_feature is None: + raise ValueError("You must provide at least one image or the environment state among the inputs.") + + if self.crop_shape is not None: + for image_ft in self.image_features: + if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]: + raise ValueError( + f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} " + f"for `crop_shape` and {image_ft.shape} for " + f"`{image_ft.key}`." + ) + + # Check that all input images have the same shape. + first_image_ft = next(iter(self.image_features)) + for image_ft in self.image_features: + if image_ft.shape != first_image_ft.shape: + raise ValueError( + f"`{image_ft.key}` does not match `{first_image_ft.key}`, but we " + "expect all image shapes to match." + ) + @property def observation_delta_indices(self) -> list: return list(range(1 - self.n_obs_steps, 1)) diff --git a/lerobot/common/policies/diffusion/modeling_diffusion.py b/lerobot/common/policies/diffusion/modeling_diffusion.py index 9ba5626007..8299fee211 100644 --- a/lerobot/common/policies/diffusion/modeling_diffusion.py +++ b/lerobot/common/policies/diffusion/modeling_diffusion.py @@ -39,6 +39,7 @@ from lerobot.common.policies.utils import ( get_device_from_parameters, get_dtype_from_parameters, + get_output_shape, populate_queues, ) @@ -59,7 +60,7 @@ class DiffusionPolicy( def __init__( self, - config: DiffusionConfig | None = None, + config: DiffusionConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): """ @@ -70,38 +71,32 @@ def __init__( that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__() - if config is None: - config = DiffusionConfig() + config.validate_features() self.config = config - self.normalize_inputs = Normalize( - config.input_shapes, config.input_normalization_modes, dataset_stats - ) - self.normalize_targets = Normalize( - config.output_shapes, config.output_normalization_modes, dataset_stats - ) - self.unnormalize_outputs = Unnormalize( - config.output_shapes, config.output_normalization_modes, dataset_stats - ) + + self.normalize_inputs = Normalize(config.input_features, dataset_stats) + self.normalize_targets = Normalize(config.output_features, dataset_stats) + self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) # queues are populated during rollout of the policy, they contain the n latest observations and actions self._queues = None self.diffusion = DiffusionModel(config) - self.expected_image_keys = [k for k in config.input_shapes if k.startswith("observation.image")] - self.use_env_state = "observation.environment_state" in config.input_shapes - self.reset() + def get_optim_params(self) -> dict: + return self.diffusion.parameters() + def reset(self): """Clear observation and action queues. Should be called on `env.reset()`""" self._queues = { "observation.state": deque(maxlen=self.config.n_obs_steps), "action": deque(maxlen=self.config.n_action_steps), } - if len(self.expected_image_keys) > 0: + if self.config.image_features: self._queues["observation.images"] = deque(maxlen=self.config.n_obs_steps) - if self.use_env_state: + if self.config.env_state_feature: self._queues["observation.environment_state"] = deque(maxlen=self.config.n_obs_steps) @torch.no_grad @@ -127,9 +122,11 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: actually measured from the first observation which (if `n_obs_steps` > 1) happened in the past. """ batch = self.normalize_inputs(batch) - if len(self.expected_image_keys) > 0: + if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) + batch["observation.images"] = torch.stack( + [batch[ft.key] for ft in self.config.image_features], dim=-4 + ) # Note: It's important that this happens after stacking the images into a single key. self._queues = populate_queues(self._queues, batch) @@ -149,9 +146,11 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) - if len(self.expected_image_keys) > 0: + if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) + batch["observation.images"] = torch.stack( + [batch[ft.key] for ft in self.config.image_features], dim=-4 + ) batch = self.normalize_targets(batch) loss = self.diffusion.compute_loss(batch) return {"loss": loss} @@ -176,12 +175,9 @@ def __init__(self, config: DiffusionConfig): self.config = config # Build observation encoders (depending on which observations are provided). - global_cond_dim = config.input_shapes["observation.state"][0] - num_images = len([k for k in config.input_shapes if k.startswith("observation.image")]) - self._use_images = False - self._use_env_state = False - if num_images > 0: - self._use_images = True + global_cond_dim = self.config.robot_state_feature.shape[0] + if self.config.image_features: + num_images = len(self.config.image_features) if self.config.use_separate_rgb_encoder_per_camera: encoders = [DiffusionRgbEncoder(config) for _ in range(num_images)] self.rgb_encoder = nn.ModuleList(encoders) @@ -189,9 +185,8 @@ def __init__(self, config: DiffusionConfig): else: self.rgb_encoder = DiffusionRgbEncoder(config) global_cond_dim += self.rgb_encoder.feature_dim * num_images - if "observation.environment_state" in config.input_shapes: - self._use_env_state = True - global_cond_dim += config.input_shapes["observation.environment_state"][0] + if self.config.env_state_feature: + global_cond_dim += self.config.env_state_feature.shape[0] self.unet = DiffusionConditionalUnet1d(config, global_cond_dim=global_cond_dim * config.n_obs_steps) @@ -220,7 +215,7 @@ def conditional_sample( # Sample prior. sample = torch.randn( - size=(batch_size, self.config.horizon, self.config.output_shapes["action"][0]), + size=(batch_size, self.config.horizon, self.action_feature.shape[0]), dtype=dtype, device=device, generator=generator, @@ -242,10 +237,10 @@ def conditional_sample( def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: """Encode image features and concatenate them all together along with the state vector.""" - batch_size, n_obs_steps = batch["observation.state"].shape[:2] - global_cond_feats = [batch["observation.state"]] + batch_size, n_obs_steps = batch[self.config.robot_state_feature.key].shape[:2] + global_cond_feats = [batch[self.config.robot_state_feature.key]] # Extract image features. - if self._use_images: + if self.config.image_features: if self.config.use_separate_rgb_encoder_per_camera: # Combine batch and sequence dims while rearranging to make the camera index dimension first. images_per_camera = einops.rearrange(batch["observation.images"], "b s n ... -> n (b s) ...") @@ -272,8 +267,8 @@ def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: ) global_cond_feats.append(img_features) - if self._use_env_state: - global_cond_feats.append(batch["observation.environment_state"]) + if self.config.env_state_feature: + global_cond_feats.append(batch[self.config.env_state_feature.key]) # Concatenate features then flatten to (B, global_cond_dim). return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1) @@ -443,7 +438,7 @@ def forward(self, features: Tensor) -> Tensor: class DiffusionRgbEncoder(nn.Module): - """Encoder an RGB image into a 1D feature vector. + """Encodes an RGB image into a 1D feature vector. Includes the ability to normalize and crop the image first. """ @@ -482,19 +477,17 @@ def __init__(self, config: DiffusionConfig): # Set up pooling and final layers. # Use a dry run to get the feature map shape. - # The dummy input should take the number of image channels from `config.input_shapes` and it should + # The dummy input should take the number of image channels from `config.image_features` and it should # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the - # height and width from `config.input_shapes`. - image_keys = [k for k in config.input_shapes if k.startswith("observation.image")] + # height and width from `config.image_features`. + # Note: we have a check in the config class to make sure all images have the same shape. - image_key = image_keys[0] - dummy_input_h_w = ( - config.crop_shape if config.crop_shape is not None else config.input_shapes[image_key][1:] + dummy_shape_h_w = ( + config.crop_shape if config.crop_shape is not None else config.image_features[0].shape[1:] ) - dummy_input = torch.zeros(size=(1, config.input_shapes[image_key][0], *dummy_input_h_w)) - with torch.inference_mode(): - dummy_feature_map = self.backbone(dummy_input) - feature_map_shape = tuple(dummy_feature_map.shape[1:]) + dummy_shape = (1, config.image_features[0].shape[0], *dummy_shape_h_w) + feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:] + self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) self.feature_dim = config.spatial_softmax_num_keypoints * 2 self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) @@ -611,7 +604,7 @@ def __init__(self, config: DiffusionConfig, global_cond_dim: int): # In channels / out channels for each downsampling block in the Unet's encoder. For the decoder, we # just reverse these. - in_out = [(config.output_shapes["action"][0], config.down_dims[0])] + list( + in_out = [(config.action_feature.shape[0], config.down_dims[0])] + list( zip(config.down_dims[:-1], config.down_dims[1:], strict=True) ) @@ -666,7 +659,7 @@ def __init__(self, config: DiffusionConfig, global_cond_dim: int): self.final_conv = nn.Sequential( DiffusionConv1dBlock(config.down_dims[0], config.down_dims[0], kernel_size=config.kernel_size), - nn.Conv1d(config.down_dims[0], config.output_shapes["action"][0], 1), + nn.Conv1d(config.down_dims[0], config.action_feature.shape[0], 1), ) def forward(self, x: Tensor, timestep: Tensor | int, global_cond=None) -> Tensor: diff --git a/lerobot/common/policies/utils.py b/lerobot/common/policies/utils.py index 38da5820e8..baf2ab314b 100644 --- a/lerobot/common/policies/utils.py +++ b/lerobot/common/policies/utils.py @@ -60,7 +60,6 @@ def get_output_shape(module: nn.Module, input_shape: tuple) -> tuple: Returns: tuple: The output shape of the module. """ - # dummy_input = torch.randn(*input_shape) dummy_input = torch.zeros(size=input_shape) with torch.inference_mode(): output = module(dummy_input) From a82e004a66a31bb796dd2861630c24f8d5cda94e Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 23 Dec 2024 12:36:30 +0100 Subject: [PATCH 05/94] Add pusht and xarm env configs --- lerobot/common/envs/configs.py | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py index 9449e3deba..07846dd375 100644 --- a/lerobot/common/envs/configs.py +++ b/lerobot/common/envs/configs.py @@ -42,3 +42,41 @@ class AlohaEnv(EnvConfig): "render_mode": "rgb_array", } ) + + +@EnvConfig.register_subclass("pusht") +@dataclass +class PushtEnv(EnvConfig): + task: str = "PushT-v0" + state_dim: int = 2 + action_dim: int = 2 + image_size: int = 96 + fps: int = 10 + episode_length: int = 300 + gym: dict = field( + default_factory=lambda: { + "obs_type": "pixels_agent_pos", + "render_mode": "rgb_array", + "visualization_width": 384, + "visualization_height": 384, + } + ) + + +@EnvConfig.register_subclass("xarm") +@dataclass +class XarmEnv(EnvConfig): + task: str = "XarmLift-v0" + state_dim: int = 4 + action_dim: int = 4 + image_size: int = 84 + fps: int = 15 + episode_length: int = 200 + gym: dict = field( + default_factory=lambda: { + "obs_type": "pixels_agent_pos", + "render_mode": "rgb_array", + "visualization_width": 384, + "visualization_height": 384, + } + ) From d2ca27a32c6e46256dc70a30e37ed2ab17e99fef Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 23 Dec 2024 12:44:54 +0100 Subject: [PATCH 06/94] Update tdmpc --- .../policies/tdmpc/configuration_tdmpc.py | 63 +++++----- .../common/policies/tdmpc/modeling_tdmpc.py | 115 ++++++++---------- 2 files changed, 81 insertions(+), 97 deletions(-) diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/lerobot/common/policies/tdmpc/configuration_tdmpc.py index 7705a9c0cf..4623234303 100644 --- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py +++ b/lerobot/common/policies/tdmpc/configuration_tdmpc.py @@ -16,7 +16,9 @@ # limitations under the License. from dataclasses import dataclass, field -from lerobot.configs.policies import PretrainedConfig +from lerobot.common.optim.optimizers import AdamConfig +from lerobot.common.optim.schedulers import NoneSchedulerConfig +from lerobot.configs.policies import NormalizationMode, PretrainedConfig @PretrainedConfig.register_subclass("tdmpc") @@ -110,23 +112,15 @@ class TDMPCConfig(PretrainedConfig): horizon: int = 5 n_action_steps: int = 1 - input_shapes: dict[str, list[int]] = field( + normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { - "observation.image": [3, 84, 84], - "observation.state": [4], - } - ) - output_shapes: dict[str, list[int]] = field( - default_factory=lambda: { - "action": [4], + "VISUAL": NormalizationMode.MIN_MAX, + "STATE": NormalizationMode.MIN_MAX, + "ENV": NormalizationMode.MIN_MAX, + "ACTION": NormalizationMode.MIN_MAX, } ) - - # Normalization / Unnormalization - input_normalization_modes: dict[str, str] | None = None - output_normalization_modes: dict[str, str] = field( - default_factory=lambda: {"action": "min_max"}, - ) + normalize_inputs: bool = True # Architecture / modeling. # Neural networks. @@ -163,27 +157,16 @@ class TDMPCConfig(PretrainedConfig): # Target model. target_model_momentum: float = 0.995 + # Training presets + optimizer_lr: float = 3e-4 + def __post_init__(self): """Input validation (not exhaustive).""" - # There should only be one image key. - image_keys = {k for k in self.input_shapes if k.startswith("observation.image")} - if len(image_keys) > 1: - raise ValueError( - f"{self.__class__.__name__} handles at most one image for now. Got image keys {image_keys}." - ) - if len(image_keys) > 0: - image_key = next(iter(image_keys)) - if self.input_shapes[image_key][-2] != self.input_shapes[image_key][-1]: - # TODO(alexander-soare): This limitation is solely because of code in the random shift - # augmentation. It should be able to be removed. - raise ValueError( - f"Only square images are handled now. Got image shape {self.input_shapes[image_key]}." - ) if self.n_gaussian_samples <= 0: raise ValueError( f"The number of guassian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`" ) - if self.output_normalization_modes != {"action": "min_max"}: + if self.normalization_mapping["ACTION"] is not NormalizationMode.MIN_MAX: raise ValueError( "TD-MPC assumes the action space dimensions to all be in [-1, 1]. Therefore it is strongly " f"advised that you stick with the default. See {self.__class__.__name__} docstring for more " @@ -203,6 +186,26 @@ def __post_init__(self): if self.n_action_steps > self.horizon: raise ValueError("`n_action_steps` must be less than or equal to `horizon`.") + def get_optimizer_preset(self) -> AdamConfig: + return AdamConfig(lr=self.optimizer_lr) + + def get_scheduler_preset(self) -> NoneSchedulerConfig: + return NoneSchedulerConfig() + + def validate_features(self) -> None: + # There should only be one image key. + if len(self.image_features) > 1: + raise ValueError( + f"{self.__class__.__name__} handles at most one image for now. Got image keys {self.image_features}." + ) + + if len(self.image_features) > 0: + image_ft = next(iter(self.image_features)) + if image_ft.shape[-2] != image_ft.shape[-1]: + # TODO(alexander-soare): This limitation is solely because of code in the random shift + # augmentation. It should be able to be removed. + raise ValueError(f"Only square images are handled now. Got image shape {image_ft.shape}.") + @property def observation_delta_indices(self) -> list: return list(range(self.horizon + 1)) diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index d97c4824c4..e124c25d88 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -38,7 +38,7 @@ from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig -from lerobot.common.policies.utils import get_device_from_parameters, populate_queues +from lerobot.common.policies.utils import get_device_from_parameters, get_output_shape, populate_queues class TDMPCPolicy( @@ -67,9 +67,7 @@ class TDMPCPolicy( name = "tdmpc" - def __init__( - self, config: TDMPCConfig | None = None, dataset_stats: dict[str, dict[str, Tensor]] | None = None - ): + def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None): """ Args: config: Policy configuration class instance or None, in which case the default instantiation of @@ -78,41 +76,25 @@ def __init__( that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__() - - if config is None: - config = TDMPCConfig() + config.validate_features() self.config = config + + self.normalize_inputs = ( + Normalize(config.input_features, dataset_stats) if config.normalize_inputs else nn.Identity() + ) + self.normalize_targets = Normalize(config.output_features, dataset_stats) + self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) + self.model = TDMPCTOLD(config) self.model_target = deepcopy(self.model) for param in self.model_target.parameters(): param.requires_grad = False - if config.input_normalization_modes is not None: - self.normalize_inputs = Normalize( - config.input_shapes, config.input_normalization_modes, dataset_stats - ) - else: - self.normalize_inputs = nn.Identity() - self.normalize_targets = Normalize( - config.output_shapes, config.output_normalization_modes, dataset_stats - ) - self.unnormalize_outputs = Unnormalize( - config.output_shapes, config.output_normalization_modes, dataset_stats - ) - - image_keys = [k for k in config.input_shapes if k.startswith("observation.image")] - # Note: This check is covered in the post-init of the config but have a sanity check just in case. - self._use_image = False - self._use_env_state = False - if len(image_keys) > 0: - assert len(image_keys) == 1 - self._use_image = True - self.input_image_key = image_keys[0] - if "observation.environment_state" in config.input_shapes: - self._use_env_state = True - self.reset() + def get_optim_params(self) -> dict: + return self.parameters() + def reset(self): """ Clear observation and action queues. Clear previous means for warm starting of MPPI/CEM. Should be @@ -122,9 +104,9 @@ def reset(self): "observation.state": deque(maxlen=1), "action": deque(maxlen=max(self.config.n_action_steps, self.config.n_action_repeats)), } - if self._use_image: + if self.config.image_features: self._queues["observation.image"] = deque(maxlen=1) - if self._use_env_state: + if self.config.env_state_feature: self._queues["observation.environment_state"] = deque(maxlen=1) # Previous mean obtained from the cross-entropy method (CEM) used during MPC. It is used to warm start # CEM for the next step. @@ -134,9 +116,9 @@ def reset(self): def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations.""" batch = self.normalize_inputs(batch) - if self._use_image: + if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.image"] = batch[self.input_image_key] + batch["observation.image"] = batch[self.config.image_features[0].key] self._queues = populate_queues(self._queues, batch) @@ -151,9 +133,9 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: # NOTE: Order of observations matters here. encode_keys = [] - if self._use_image: + if self.config.image_features: encode_keys.append("observation.image") - if self._use_env_state: + if self.config.env_state_feature: encode_keys.append("observation.environment_state") encode_keys.append("observation.state") z = self.model.encode({k: batch[k] for k in encode_keys}) @@ -196,7 +178,7 @@ def plan(self, z: Tensor) -> Tensor: self.config.horizon, self.config.n_pi_samples, batch_size, - self.config.output_shapes["action"][0], + self.config.action_feature.shape[0], device=device, ) if self.config.n_pi_samples > 0: @@ -215,7 +197,7 @@ def plan(self, z: Tensor) -> Tensor: # algorithm. # The initial mean and standard deviation for the cross-entropy method (CEM). mean = torch.zeros( - self.config.horizon, batch_size, self.config.output_shapes["action"][0], device=device + self.config.horizon, batch_size, self.config.action_feature.shape[0], device=device ) # Maybe warm start CEM with the mean from the previous step. if self._prev_mean is not None: @@ -228,7 +210,7 @@ def plan(self, z: Tensor) -> Tensor: self.config.horizon, self.config.n_gaussian_samples, batch_size, - self.config.output_shapes["action"][0], + self.config.action_feature.shape[0], device=std.device, ) gaussian_actions = torch.clamp(mean.unsqueeze(1) + std.unsqueeze(1) * std_normal_noise, -1, 1) @@ -330,9 +312,9 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]: device = get_device_from_parameters(self) batch = self.normalize_inputs(batch) - if self._use_image: + if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.image"] = batch[self.input_image_key] + batch["observation.image"] = batch[self.config.image_features[0].key] batch = self.normalize_targets(batch) info = {} @@ -347,7 +329,7 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]: observations = {k: v for k, v in batch.items() if k.startswith("observation.")} # Apply random image augmentations. - if self._use_image and self.config.max_random_shift_ratio > 0: + if self.config.image_features and self.config.max_random_shift_ratio > 0: observations["observation.image"] = flatten_forward_unflatten( partial(random_shifts_aug, max_random_shift_ratio=self.config.max_random_shift_ratio), observations["observation.image"], @@ -360,7 +342,7 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]: current_observation[k] = observations[k][0] next_observations[k] = observations[k][1:] horizon, batch_size = next_observations[ - "observation.image" if self._use_image else "observation.environment_state" + "observation.image" if self.config.image_features else "observation.environment_state" ].shape[:2] # Run latent rollout using the latent dynamics model and policy model. @@ -543,7 +525,7 @@ def __init__(self, config: TDMPCConfig): self.config = config self._encoder = TDMPCObservationEncoder(config) self._dynamics = nn.Sequential( - nn.Linear(config.latent_dim + config.output_shapes["action"][0], config.mlp_dim), + nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.mlp_dim), @@ -554,7 +536,7 @@ def __init__(self, config: TDMPCConfig): nn.Sigmoid(), ) self._reward = nn.Sequential( - nn.Linear(config.latent_dim + config.output_shapes["action"][0], config.mlp_dim), + nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.mlp_dim), @@ -569,12 +551,12 @@ def __init__(self, config: TDMPCConfig): nn.Linear(config.mlp_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), - nn.Linear(config.mlp_dim, config.output_shapes["action"][0]), + nn.Linear(config.mlp_dim, config.action_feature.shape[0]), ) self._Qs = nn.ModuleList( [ nn.Sequential( - nn.Linear(config.latent_dim + config.output_shapes["action"][0], config.mlp_dim), + nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Tanh(), nn.Linear(config.mlp_dim, config.mlp_dim), @@ -714,11 +696,9 @@ def __init__(self, config: TDMPCConfig): super().__init__() self.config = config - if "observation.image" in config.input_shapes: + if config.image_features: self.image_enc_layers = nn.Sequential( - nn.Conv2d( - config.input_shapes["observation.image"][0], config.image_encoder_hidden_dim, 7, stride=2 - ), + nn.Conv2d(config.image_features[0].shape[0], config.image_encoder_hidden_dim, 7, stride=2), nn.ReLU(), nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 5, stride=2), nn.ReLU(), @@ -727,9 +707,8 @@ def __init__(self, config: TDMPCConfig): nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 3, stride=2), nn.ReLU(), ) - dummy_batch = torch.zeros(1, *config.input_shapes["observation.image"]) - with torch.inference_mode(): - out_shape = self.image_enc_layers(dummy_batch).shape[1:] + dummy_shape = (1, *config.image_features[0].shape) + out_shape = get_output_shape(self.image_enc_layers, dummy_shape)[1:] self.image_enc_layers.extend( nn.Sequential( nn.Flatten(), @@ -738,19 +717,19 @@ def __init__(self, config: TDMPCConfig): nn.Sigmoid(), ) ) - if "observation.state" in config.input_shapes: + + if config.robot_state_feature: self.state_enc_layers = nn.Sequential( - nn.Linear(config.input_shapes["observation.state"][0], config.state_encoder_hidden_dim), + nn.Linear(config.robot_state_feature.shape[0], config.state_encoder_hidden_dim), nn.ELU(), nn.Linear(config.state_encoder_hidden_dim, config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid(), ) - if "observation.environment_state" in config.input_shapes: + + if config.env_state_feature: self.env_state_enc_layers = nn.Sequential( - nn.Linear( - config.input_shapes["observation.environment_state"][0], config.state_encoder_hidden_dim - ), + nn.Linear(config.env_state_feature.shape[0], config.state_encoder_hidden_dim), nn.ELU(), nn.Linear(config.state_encoder_hidden_dim, config.latent_dim), nn.LayerNorm(config.latent_dim), @@ -765,12 +744,14 @@ def forward(self, obs_dict: dict[str, Tensor]) -> Tensor: """ feat = [] # NOTE: Order of observations matters here. - if "observation.image" in self.config.input_shapes: - feat.append(flatten_forward_unflatten(self.image_enc_layers, obs_dict["observation.image"])) - if "observation.environment_state" in self.config.input_shapes: - feat.append(self.env_state_enc_layers(obs_dict["observation.environment_state"])) - if "observation.state" in self.config.input_shapes: - feat.append(self.state_enc_layers(obs_dict["observation.state"])) + if self.config.image_features: + feat.append( + flatten_forward_unflatten(self.image_enc_layers, obs_dict[self.config.image_features[0].key]) + ) + if self.config.env_state_feature: + feat.append(self.env_state_enc_layers(obs_dict[self.config.env_state_feature.key])) + if self.config.robot_state_feature: + feat.append(self.state_enc_layers(obs_dict[self.config.robot_state_feature.key])) return torch.stack(feat, dim=0).mean(0) From 250e3808be7852337598c4bbe5ccb372a7a8d627 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 23 Dec 2024 12:49:06 +0100 Subject: [PATCH 07/94] Update vqbet --- .../policies/vqbet/configuration_vqbet.py | 77 ++++++---- .../common/policies/vqbet/modeling_vqbet.py | 139 +++++++++--------- 2 files changed, 118 insertions(+), 98 deletions(-) diff --git a/lerobot/common/policies/vqbet/configuration_vqbet.py b/lerobot/common/policies/vqbet/configuration_vqbet.py index 4d8ce94cfe..181304f66f 100644 --- a/lerobot/common/policies/vqbet/configuration_vqbet.py +++ b/lerobot/common/policies/vqbet/configuration_vqbet.py @@ -18,7 +18,9 @@ from dataclasses import dataclass, field -from lerobot.configs.policies import PretrainedConfig +from lerobot.common.optim.optimizers import AdamConfig +from lerobot.common.optim.schedulers import VQBeTSchedulerConfig +from lerobot.configs.policies import NormalizationMode, PretrainedConfig @PretrainedConfig.register_subclass("vqbet") @@ -93,27 +95,14 @@ class VQBeTConfig(PretrainedConfig): n_action_pred_token: int = 3 action_chunk_size: int = 5 - input_shapes: dict[str, list[int]] = field( + normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { - "observation.image": [3, 96, 96], - "observation.state": [2], - } - ) - output_shapes: dict[str, list[int]] = field( - default_factory=lambda: { - "action": [2], + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MIN_MAX, + "ACTION": NormalizationMode.MIN_MAX, } ) - # Normalization / Unnormalization - input_normalization_modes: dict[str, str] = field( - default_factory=lambda: { - "observation.image": "mean_std", - "observation.state": "min_max", - } - ) - output_normalization_modes: dict[str, str] = field(default_factory=lambda: {"action": "min_max"}) - # Architecture / modeling. # Vision backbone. vision_backbone: str = "resnet18" @@ -142,30 +131,56 @@ class VQBeTConfig(PretrainedConfig): bet_softmax_temperature: float = 0.1 sequentially_select: bool = False + # Training presets + optimizer_lr: float = 1e-4 + optimizer_betas: tuple = (0.95, 0.999) + optimizer_eps: float = 1e-8 + optimizer_weight_decay: float = 1e-6 + optimizer_vqvae_lr: float = 1e-3 + scheduler_warmup_steps: int = 500 + def __post_init__(self): """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): raise ValueError( f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." ) - image_keys = {k for k in self.input_shapes if k.startswith("observation.image")} + + def get_optimizer_preset(self) -> AdamConfig: + return AdamConfig( + lr=self.optimizer_lr, + betas=self.optimizer_betas, + eps=self.optimizer_eps, + weight_decay=self.optimizer_weight_decay, + ) + + def get_scheduler_preset(self) -> VQBeTSchedulerConfig: + return VQBeTSchedulerConfig( + num_warmup_steps=self.scheduler_warmup_steps, + num_vqvae_training_steps=self.n_vqvae_training_steps, + ) + + def validate_features(self) -> None: + # Note: this check was previously performed inside VQBeTRgbEncoder in the form of + # assert len(image_keys) == 1 + if not len(self.image_features) == 1: + raise ValueError("You must provide at least one image among the inputs.") + if self.crop_shape is not None: - for image_key in image_keys: - if ( - self.crop_shape[0] > self.input_shapes[image_key][1] - or self.crop_shape[1] > self.input_shapes[image_key][2] - ): + for image_ft in self.image_features: + if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]: raise ValueError( - f"`crop_shape` should fit within `input_shapes[{image_key}]`. Got {self.crop_shape} " - f"for `crop_shape` and {self.input_shapes[image_key]} for " - "`input_shapes[{image_key}]`." + f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} " + f"for `crop_shape` and {image_ft.shape} for " + f"`{image_ft.key}`." ) + # Check that all input images have the same shape. - first_image_key = next(iter(image_keys)) - for image_key in image_keys: - if self.input_shapes[image_key] != self.input_shapes[first_image_key]: + first_image_ft = next(iter(self.image_features)) + for image_ft in self.image_features: + if image_ft.shape != first_image_ft.shape: raise ValueError( - f"`input_shapes[{image_key}]` does not match `input_shapes[{first_image_key}]`, but we " + f"`{image_ft.key}` does not match `{first_image_ft.key}`, but we " "expect all image shapes to match." ) diff --git a/lerobot/common/policies/vqbet/modeling_vqbet.py b/lerobot/common/policies/vqbet/modeling_vqbet.py index 98adce00b1..ee0c12feb2 100644 --- a/lerobot/common/policies/vqbet/modeling_vqbet.py +++ b/lerobot/common/policies/vqbet/modeling_vqbet.py @@ -16,7 +16,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import math import warnings from collections import deque from typing import Callable, List @@ -28,10 +27,9 @@ import torchvision from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn -from torch.optim.lr_scheduler import LambdaLR from lerobot.common.policies.normalize import Normalize, Unnormalize -from lerobot.common.policies.utils import get_device_from_parameters, populate_queues +from lerobot.common.policies.utils import get_device_from_parameters, get_output_shape, populate_queues from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.common.policies.vqbet.vqbet_utils import GPT, ResidualVQ @@ -64,25 +62,58 @@ def __init__( that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__() - if config is None: - config = VQBeTConfig() + config.validate_features() self.config = config - self.normalize_inputs = Normalize( - config.input_shapes, config.input_normalization_modes, dataset_stats - ) - self.normalize_targets = Normalize( - config.output_shapes, config.output_normalization_modes, dataset_stats - ) - self.unnormalize_outputs = Unnormalize( - config.output_shapes, config.output_normalization_modes, dataset_stats - ) - self.vqbet = VQBeTModel(config) + self.normalize_inputs = Normalize(config.input_features, dataset_stats) + self.normalize_targets = Normalize(config.output_features, dataset_stats) + self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) - self.expected_image_keys = [k for k in config.input_shapes if k.startswith("observation.image")] + self.vqbet = VQBeTModel(config) self.reset() + def get_optim_params(self) -> dict: + vqvae_params = ( + list(self.vqbet.action_head.vqvae_model.encoder.parameters()) + + list(self.vqbet.action_head.vqvae_model.decoder.parameters()) + + list(self.vqbet.action_head.vqvae_model.vq_layer.parameters()) + ) + decay_params, no_decay_params = self.vqbet.policy.configure_parameters() + decay_params = ( + decay_params + + list(self.vqbet.rgb_encoder.parameters()) + + list(self.vqbet.state_projector.parameters()) + + list(self.vqbet.rgb_feature_projector.parameters()) + + [self.vqbet.action_token] + + list(self.vqbet.action_head.map_to_cbet_preds_offset.parameters()) + ) + + if self.config.sequentially_select: + decay_params = ( + decay_params + + list(self.vqbet.action_head.map_to_cbet_preds_primary_bin.parameters()) + + list(self.vqbet.action_head.map_to_cbet_preds_secondary_bin.parameters()) + ) + else: + decay_params = decay_params + list(self.vqbet.action_head.map_to_cbet_preds_bin.parameters()) + + return [ + { + "params": decay_params, + # "weight_decay": cfg.training.adam_weight_decay, + }, + { + "params": vqvae_params, + "weight_decay": 1e-4, + "lr": self.config.optimizer_vqvae_lr, + }, + { + "params": no_decay_params, + "weight_decay": 0.0, + }, + ] + def reset(self): """ Clear observation and action queues. Should be called on `env.reset()` @@ -105,7 +136,9 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) + batch["observation.images"] = torch.stack( + [batch[ft.key] for ft in self.config.image_features], dim=-4 + ) # Note: It's important that this happens after stacking the images into a single key. self._queues = populate_queues(self._queues, batch) @@ -131,7 +164,9 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) + batch["observation.images"] = torch.stack( + [batch[ft.key] for ft in self.config.image_features], dim=-4 + ) batch = self.normalize_targets(batch) # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://arxiv.org/pdf/2403.03181) if not self.vqbet.action_head.vqvae_model.discretized.item(): @@ -288,14 +323,14 @@ def __init__(self, config: VQBeTConfig): self.config = config self.rgb_encoder = VQBeTRgbEncoder(config) - self.num_images = len([k for k in config.input_shapes if k.startswith("observation.image")]) + self.num_images = len(self.config.image_features) # This action query token is used as a prompt for querying action chunks. Please refer to "A_Q" in the image above. # Note: During the forward pass, this token is repeated as many times as needed. The authors also experimented with initializing the necessary number of tokens independently and observed inferior results. self.action_token = nn.Parameter(torch.randn(1, 1, self.config.gpt_input_dim)) # To input state and observation features into GPT layers, we first project the features to fit the shape of input size of GPT. self.state_projector = MLP( - config.input_shapes["observation.state"][0], hidden_channels=[self.config.gpt_input_dim] + config.robot_state_feature.shape[0], hidden_channels=[self.config.gpt_input_dim] ) self.rgb_feature_projector = MLP( self.rgb_encoder.feature_dim, hidden_channels=[self.config.gpt_input_dim] @@ -350,10 +385,10 @@ def forward(self, batch: dict[str, Tensor], rollout: bool) -> Tensor: # get action features (pass through GPT) features = self.policy(input_tokens) - # len(self.config.input_shapes) is the number of different observation modes. + # len(self.config.input_features) is the number of different observation modes. # this line gets the index of action prompt tokens. - historical_act_pred_index = np.arange(0, n_obs_steps) * (len(self.config.input_shapes) + 1) + len( - self.config.input_shapes + historical_act_pred_index = np.arange(0, n_obs_steps) * (len(self.config.input_features) + 1) + len( + self.config.input_features ) # only extract the output tokens at the position of action query: @@ -392,7 +427,7 @@ def __init__(self, config: VQBeTConfig): self.map_to_cbet_preds_offset: output the predicted offsets for all the codes in all the layers. The input dimension of ` self.map_to_cbet_preds_offset` is same with the output of GPT, - and the output dimension of ` self.map_to_cbet_preds_offset` is `self.vqvae_model.vqvae_num_layers (=fixed as 2) * self.config.vqvae_n_embed * config.action_chunk_size * config.output_shapes["action"][0]`. + and the output dimension of ` self.map_to_cbet_preds_offset` is `self.vqvae_model.vqvae_num_layers (=fixed as 2) * self.config.vqvae_n_embed * config.action_chunk_size * config.action_feature.shape[0]`. """ super().__init__() @@ -419,7 +454,7 @@ def __init__(self, config: VQBeTConfig): self.vqvae_model.vqvae_num_layers * self.config.vqvae_n_embed * config.action_chunk_size - * config.output_shapes["action"][0], + * config.action_feature.shape[0], ], ) # loss @@ -674,33 +709,6 @@ def __init__(self, policy, cfg): ) -class VQBeTScheduler(nn.Module): - def __init__(self, optimizer, cfg): - super().__init__() - n_vqvae_training_steps = cfg.training.n_vqvae_training_steps - - num_warmup_steps = cfg.training.lr_warmup_steps - num_training_steps = cfg.training.offline_steps - num_cycles = 0.5 - - def lr_lambda(current_step): - if current_step < n_vqvae_training_steps: - return float(1) - else: - current_step = current_step - n_vqvae_training_steps - if current_step < num_warmup_steps: - return float(current_step) / float(max(1, num_warmup_steps)) - progress = float(current_step - num_warmup_steps) / float( - max(1, num_training_steps - num_warmup_steps) - ) - return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) - - self.lr_scheduler = LambdaLR(optimizer, lr_lambda, -1) - - def step(self): - self.lr_scheduler.step() - - class VQBeTRgbEncoder(nn.Module): """Encode an RGB image into a 1D feature vector. @@ -743,19 +751,16 @@ def __init__(self, config: VQBeTConfig): # Set up pooling and final layers. # Use a dry run to get the feature map shape. - # The dummy input should take the number of image channels from `config.input_shapes` and it should + # The dummy input should take the number of image channels from `config.image_features` and it should # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the - # height and width from `config.input_shapes`. - image_keys = [k for k in config.input_shapes if k.startswith("observation.image")] - assert len(image_keys) == 1 - image_key = image_keys[0] - dummy_input_h_w = ( - config.crop_shape if config.crop_shape is not None else config.input_shapes[image_key][1:] + # height and width from `config.image_features`. + + dummy_shape_h_w = ( + config.crop_shape if config.crop_shape is not None else config.image_features[0].shape[1:] ) - dummy_input = torch.zeros(size=(1, config.input_shapes[image_key][0], *dummy_input_h_w)) - with torch.inference_mode(): - dummy_feature_map = self.backbone(dummy_input) - feature_map_shape = tuple(dummy_feature_map.shape[1:]) + dummy_shape = (1, config.image_features[0].shape[0], *dummy_shape_h_w) + feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:] + self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) self.feature_dim = config.spatial_softmax_num_keypoints * 2 self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) @@ -844,7 +849,7 @@ def __init__( ) self.encoder = MLP( - in_channels=self.config.output_shapes["action"][0] * self.config.action_chunk_size, + in_channels=self.config.action_feature.shape[0] * self.config.action_chunk_size, hidden_channels=[ config.vqvae_enc_hidden_dim, config.vqvae_enc_hidden_dim, @@ -856,7 +861,7 @@ def __init__( hidden_channels=[ config.vqvae_enc_hidden_dim, config.vqvae_enc_hidden_dim, - self.config.output_shapes["action"][0] * self.config.action_chunk_size, + self.config.action_feature.shape[0] * self.config.action_chunk_size, ], ) @@ -872,9 +877,9 @@ def get_action_from_latent(self, latent): # given latent vector, this function outputs the decoded action. output = self.decoder(latent) if self.config.action_chunk_size == 1: - return einops.rearrange(output, "N (T A) -> N T A", A=self.config.output_shapes["action"][0]) + return einops.rearrange(output, "N (T A) -> N T A", A=self.config.action_feature.shape[0]) else: - return einops.rearrange(output, "N (T A) -> N T A", A=self.config.output_shapes["action"][0]) + return einops.rearrange(output, "N (T A) -> N T A", A=self.config.action_feature.shape[0]) def get_code(self, state): # in phase 2 of VQ-BeT training, we need a `ground truth labels of action data` to calculate the Focal loss for code prediction head. (please refer to section 3.3 in the paper https://arxiv.org/pdf/2403.03181) From d8ad763c4ebaadefd58e9120a7c6beea3d8243ec Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 23 Dec 2024 12:53:16 +0100 Subject: [PATCH 08/94] Fix poetry relax --- poetry.lock | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/poetry.lock b/poetry.lock index f8d1cf2028..df7a7b9aa4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -7655,4 +7655,4 @@ xarm = ["gym-xarm"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "0ef550e648ac7eae32b18584d4facb7a83cf1e0ee9a6705daf89783fb56db8fb" +content-hash = "5350e7ec386ad3a610c0cb546a1681762cc8c6d37eef47fd833ab958b815b260" diff --git a/pyproject.toml b/pyproject.toml index 2e7e1328f2..5fd6084e88 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,7 +71,7 @@ pyrender = {git = "https://github.com/mmatl/pyrender.git", markers = "sys_platfo hello-robot-stretch-body = {version = ">=0.7.27", markers = "sys_platform == 'linux'", optional = true} pyserial = {version = ">=3.5", optional = true} jsonlines = ">=4.0.0" -draccus = "^0.9.3" +draccus = ">=0.9.3" [tool.poetry.extras] From 928a417041580042e5b35d96010e8ffcd2832889 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 27 Dec 2024 17:21:57 +0100 Subject: [PATCH 09/94] Add feature types to envs --- lerobot/common/envs/configs.py | 40 +++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py index 07846dd375..1c4042e049 100644 --- a/lerobot/common/envs/configs.py +++ b/lerobot/common/envs/configs.py @@ -2,20 +2,15 @@ import draccus - -@dataclass -class GymConfig: - obs_type: str = "pixels_agent_pos" - render_mode: str = "rgb_array" +from lerobot.configs.types import FeatureType @dataclass class EnvConfig(draccus.ChoiceRegistry): n_envs: int | None = None task: str | None = None - state_dim: int = 18 - action_dim: int = 18 fps: int = 30 + feature_types: dict = field(default_factory=dict) @property def type(self) -> str: @@ -32,10 +27,17 @@ class RealEnv(EnvConfig): @dataclass class AlohaEnv(EnvConfig): task: str = "AlohaInsertion-v0" - state_dim: int = 14 - action_dim: int = 14 fps: int = 50 episode_length: int = 400 + feature_types: dict = field( + default_factory=lambda: { + "agent_pos": FeatureType.STATE, + "pixels": { + "top": FeatureType.VISUAL, + }, + "action": FeatureType.ACTION, + } + ) gym: dict = field( default_factory=lambda: { "obs_type": "pixels_agent_pos", @@ -48,11 +50,15 @@ class AlohaEnv(EnvConfig): @dataclass class PushtEnv(EnvConfig): task: str = "PushT-v0" - state_dim: int = 2 - action_dim: int = 2 - image_size: int = 96 fps: int = 10 episode_length: int = 300 + feature_types: dict = field( + default_factory=lambda: { + "agent_pos": FeatureType.STATE, + "pixels": FeatureType.VISUAL, + "action": FeatureType.ACTION, + } + ) gym: dict = field( default_factory=lambda: { "obs_type": "pixels_agent_pos", @@ -67,11 +73,15 @@ class PushtEnv(EnvConfig): @dataclass class XarmEnv(EnvConfig): task: str = "XarmLift-v0" - state_dim: int = 4 - action_dim: int = 4 - image_size: int = 84 fps: int = 15 episode_length: int = 200 + feature_types: dict = field( + default_factory=lambda: { + "agent_pos": FeatureType.STATE, + "pixels": FeatureType.VISUAL, + "action": FeatureType.ACTION, + } + ) gym: dict = field( default_factory=lambda: { "obs_type": "pixels_agent_pos", From b5f328701225437463d1bbfbfc3d932a95f20205 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 27 Dec 2024 17:25:51 +0100 Subject: [PATCH 10/94] Add EvalPipelineConfig, parse features from envs --- lerobot/common/datasets/factory.py | 4 +- lerobot/common/datasets/utils.py | 13 ++ lerobot/common/envs/factory.py | 25 ++- lerobot/common/envs/utils.py | 1 + lerobot/common/logger.py | 6 +- lerobot/common/optim/factory.py | 8 +- .../common/policies/act/configuration_act.py | 5 +- .../diffusion/configuration_diffusion.py | 5 +- .../policies/diffusion/modeling_diffusion.py | 2 +- lerobot/common/policies/factory.py | 38 +++- lerobot/common/policies/normalize.py | 3 +- .../policies/tdmpc/configuration_tdmpc.py | 5 +- lerobot/common/policies/utils.py | 31 +++ .../policies/vqbet/configuration_vqbet.py | 6 +- .../common/policies/vqbet/modeling_vqbet.py | 54 +---- lerobot/common/robot_devices/control_utils.py | 4 +- lerobot/configs/default.py | 184 ------------------ lerobot/configs/eval.py | 75 +++++++ lerobot/configs/policies.py | 87 +++++++-- lerobot/configs/training.py | 163 ++++++++++++++++ lerobot/configs/types.py | 20 ++ lerobot/scripts/control_sim_robot.py | 2 +- lerobot/scripts/eval.py | 142 ++------------ lerobot/scripts/train.py | 83 ++++---- 24 files changed, 506 insertions(+), 460 deletions(-) create mode 100644 lerobot/configs/eval.py create mode 100644 lerobot/configs/training.py create mode 100644 lerobot/configs/types.py diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 9e2730eee2..a11c4f8604 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -24,8 +24,8 @@ MultiLeRobotDataset, ) from lerobot.common.datasets.transforms import ImageTransforms -from lerobot.configs.default import MainConfig from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.training import TrainPipelineConfig IMAGENET_STATS = { "mean": [[[0.485]], [[0.456]], [[0.406]]], # (c,1,1) @@ -56,7 +56,7 @@ def resolve_delta_timestamps( return delta_timestamps -def make_dataset(cfg: MainConfig, split: str = "train") -> LeRobotDataset | MultiLeRobotDataset: +def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDataset: """ Args: cfg: A Hydra config as per the LeRobot config scheme. diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index 123c5960f5..d1cd4269de 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -35,6 +35,7 @@ from torchvision import transforms from lerobot.common.robot_devices.robots.utils import Robot +from lerobot.configs.types import DictLike DEFAULT_CHUNK_SIZE = 1000 # Max number of episodes per chunk @@ -98,6 +99,18 @@ def unflatten_dict(d: dict, sep: str = "/") -> dict: return outdict +def get_nested_item(obj: DictLike, flattened_key: str, sep: str = "/") -> Any: + split_keys = flattened_key.split(sep) + getter = obj[split_keys[0]] + if len(split_keys) == 1: + return getter + + for key in split_keys[1:]: + getter = getter[key] + + return getter + + def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict: serialized_dict = {key: value.tolist() for key, value in flatten_dict(stats).items()} return unflatten_dict(serialized_dict) diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 2ad60409ef..4cf96f8ced 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -17,39 +17,38 @@ import gymnasium as gym -from lerobot.configs.default import MainConfig +from lerobot.common.envs.configs import EnvConfig -def make_env(cfg: MainConfig) -> gym.vector.VectorEnv | None: +def make_env( + cfg: EnvConfig, n_envs: int | None = None, use_async_envs: bool = False +) -> gym.vector.VectorEnv | None: """Makes a gym vector environment according to the evaluation config. n_envs can be used to override eval.batch_size in the configuration. Must be at least 1. """ - n_envs = cfg.training.online.rollout_batch_size if n_envs is not None and n_envs < 1: raise ValueError("`n_envs must be at least 1") - if cfg.env.type == "real_world": + if cfg.type == "real_world": return - package_name = f"gym_{cfg.env.type}" + package_name = f"gym_{cfg.type}" try: importlib.import_module(package_name) except ModuleNotFoundError as e: - print( - f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.env.type}]'`" - ) + print(f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.type}]'`") raise e - gym_handle = f"{package_name}/{cfg.env.task}" - gym_kwgs = getattr(cfg.env, "gym", {}) + gym_handle = f"{package_name}/{cfg.task}" + gym_kwgs = getattr(cfg, "gym", {}) - if getattr(cfg.env, "episode_length", None): - gym_kwgs["max_episode_steps"] = cfg.env.episode_length + if getattr(cfg, "episode_length", None): + gym_kwgs["max_episode_steps"] = cfg.episode_length # batched version of the env that returns an observation of shape (b, c) - env_cls = gym.vector.AsyncVectorEnv if cfg.eval.use_async_envs else gym.vector.SyncVectorEnv + env_cls = gym.vector.AsyncVectorEnv if use_async_envs else gym.vector.SyncVectorEnv env = env_cls( [ lambda: gym.make(gym_handle, disable_env_checker=True, **gym_kwgs) diff --git a/lerobot/common/envs/utils.py b/lerobot/common/envs/utils.py index 001973bc1b..9f4f7e17e9 100644 --- a/lerobot/common/envs/utils.py +++ b/lerobot/common/envs/utils.py @@ -20,6 +20,7 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]: + # TODO(aliberts, rcadene): refactor this to use features from the environment (no hardcoding) """Convert environment observation to LeRobot format observation. Args: observation: Dictionary of observation batches from a Gym vector environment. diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 7a1622f3ea..8709047e6c 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -34,8 +34,8 @@ from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import get_global_random_state, set_global_random_state -from lerobot.configs.default import MainConfig -from lerobot.configs.policies import FeatureType, NormalizationMode +from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.types import FeatureType, NormalizationMode def log_output_dir(out_dir): @@ -86,7 +86,7 @@ class Logger: pretrained_model_dir_name = "pretrained_model" training_state_file_name = "training_state.pth" - def __init__(self, cfg: MainConfig): + def __init__(self, cfg: TrainPipelineConfig): self._cfg = cfg self.log_dir = cfg.dir self.log_dir.mkdir(parents=True, exist_ok=True) diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py index d5440f66dd..81f3352682 100644 --- a/lerobot/common/optim/factory.py +++ b/lerobot/common/optim/factory.py @@ -18,11 +18,13 @@ from torch.optim.lr_scheduler import LRScheduler from lerobot.common.policies import Policy -from lerobot.configs.default import MainConfig +from lerobot.configs.training import TrainPipelineConfig -def make_optimizer_and_scheduler(cfg: MainConfig, policy: Policy) -> tuple[Optimizer, LRScheduler | None]: +def make_optimizer_and_scheduler( + cfg: TrainPipelineConfig, policy: Policy +) -> tuple[Optimizer, LRScheduler | None]: params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters() optimizer = cfg.optimizer.build(params) - lr_scheduler = cfg.scheduler.build(optimizer, cfg.training.offline.steps) + lr_scheduler = cfg.scheduler.build(optimizer, cfg.offline.steps) return optimizer, lr_scheduler diff --git a/lerobot/common/policies/act/configuration_act.py b/lerobot/common/policies/act/configuration_act.py index 2cc7112be8..7f177f6a5e 100644 --- a/lerobot/common/policies/act/configuration_act.py +++ b/lerobot/common/policies/act/configuration_act.py @@ -17,7 +17,8 @@ from lerobot.common.optim.optimizers import AdamWConfig from lerobot.common.optim.schedulers import NoneSchedulerConfig -from lerobot.configs.policies import NormalizationMode, PretrainedConfig +from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.types import NormalizationMode @PretrainedConfig.register_subclass("act") @@ -138,6 +139,8 @@ class ACTConfig(PretrainedConfig): optimizer_lr_backbone: float = 1e-5 def __post_init__(self): + super().__post_init__() + """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): raise ValueError( diff --git a/lerobot/common/policies/diffusion/configuration_diffusion.py b/lerobot/common/policies/diffusion/configuration_diffusion.py index ed6aa7ad1b..fb58b3efdb 100644 --- a/lerobot/common/policies/diffusion/configuration_diffusion.py +++ b/lerobot/common/policies/diffusion/configuration_diffusion.py @@ -18,7 +18,8 @@ from lerobot.common.optim.optimizers import AdamConfig from lerobot.common.optim.schedulers import DiffuserSchedulerConfig -from lerobot.configs.policies import NormalizationMode, PretrainedConfig +from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.types import NormalizationMode @PretrainedConfig.register_subclass("diffusion") @@ -159,6 +160,8 @@ class DiffusionConfig(PretrainedConfig): scheduler_warmup_steps: int = 500 def __post_init__(self): + super().__post_init__() + """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): raise ValueError( diff --git a/lerobot/common/policies/diffusion/modeling_diffusion.py b/lerobot/common/policies/diffusion/modeling_diffusion.py index 8299fee211..ec58c49ce9 100644 --- a/lerobot/common/policies/diffusion/modeling_diffusion.py +++ b/lerobot/common/policies/diffusion/modeling_diffusion.py @@ -215,7 +215,7 @@ def conditional_sample( # Sample prior. sample = torch.randn( - size=(batch_size, self.config.horizon, self.action_feature.shape[0]), + size=(batch_size, self.config.horizon, self.config.action_feature.shape[0]), dtype=dtype, device=device, generator=generator, diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 5e768c1e23..9f215c7467 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -14,11 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import gymnasium as gym from torch import nn from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.common.envs.configs import EnvConfig from lerobot.common.policies.policy_protocol import Policy -from lerobot.configs.default import MainConfig +from lerobot.configs.policies import PretrainedConfig def get_policy_class(name: str) -> Policy: @@ -44,7 +46,12 @@ def get_policy_class(name: str) -> Policy: def make_policy( - cfg: MainConfig, ds_meta: LeRobotDatasetMetadata, pretrained_policy_name_or_path: str | None = None + cfg: PretrainedConfig, + device: str, + ds_meta: LeRobotDatasetMetadata | None = None, + env: gym.Env | None = None, + env_cfg: EnvConfig | None = None, + pretrained_policy_name_or_path: str | None = None, ) -> Policy: """Make an instance of a policy class. @@ -57,35 +64,48 @@ def make_policy( directory containing weights saved using `Policy.save_pretrained`. Note that providing this argument overrides everything in `hydra_cfg.policy` apart from `hydra_cfg.policy.type`. """ + if not (ds_meta is None) ^ (env is None and env_cfg is None): + raise ValueError("Either one of a dataset metadata or a sim env must be provided.") + # Note: Currently, if you try to run vqbet with mps backend, you'll get this error. # NotImplementedError: The operator 'aten::unique_dim' is not currently implemented for the MPS device. If # you want this op to be added in priority during the prototype phase of this feature, please comment on # https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment # variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be # slower than running natively on MPS. - if cfg.policy.type == "vqbet" and cfg.device == "mps": + if cfg.type == "vqbet" and device == "mps": raise NotImplementedError( "Current implementation of VQBeT does not support `mps` backend. " "Please use `cpu` or `cuda` backend." ) - policy_cls = get_policy_class(cfg.policy.type) - cfg.policy.parse_features_from_dataset(ds_meta) + policy_cls = get_policy_class(cfg.type) + + kwargs = {} + if ds_meta is not None: + cfg.parse_features_from_dataset(ds_meta) + kwargs["dataset_stats"] = ds_meta.stats + else: + cfg.parse_features_from_env(env, env_cfg) + + kwargs["config"] = cfg if pretrained_policy_name_or_path is None: # Make a fresh policy. - policy = policy_cls(cfg.policy, ds_meta.stats) + policy = policy_cls(**kwargs) else: + kwargs["pretrained_model_name_or_path"] = pretrained_policy_name_or_path + policy = policy_cls.from_pretrained(**kwargs) # Load a pretrained policy and override the config if needed (for example, if there are inference-time # hyperparameters that we want to vary). # TODO(alexander-soare): This hack makes use of huggingface_hub's tooling to load the policy with, # pretrained weights which are then loaded into a fresh policy with the desired config. This PR in # huggingface_hub should make it possible to avoid the hack: # https://github.com/huggingface/huggingface_hub/pull/2274. - policy = policy_cls(cfg.policy) - policy.load_state_dict(policy_cls.from_pretrained(pretrained_policy_name_or_path).state_dict()) + # policy = policy_cls(cfg) + # policy.load_state_dict(policy_cls.from_pretrained(pretrained_policy_name_or_path).state_dict()) - policy.to(cfg.device) + policy.to(device) assert isinstance(policy, nn.Module) return policy diff --git a/lerobot/common/policies/normalize.py b/lerobot/common/policies/normalize.py index 2d0d9ca425..59938ef9e7 100644 --- a/lerobot/common/policies/normalize.py +++ b/lerobot/common/policies/normalize.py @@ -16,7 +16,8 @@ import torch from torch import Tensor, nn -from lerobot.configs.policies import FeatureType, NormalizationMode, PolicyFeature +from lerobot.configs.policies import PolicyFeature +from lerobot.configs.types import FeatureType, NormalizationMode def create_stats_buffers( diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/lerobot/common/policies/tdmpc/configuration_tdmpc.py index 4623234303..705a0e5e2d 100644 --- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py +++ b/lerobot/common/policies/tdmpc/configuration_tdmpc.py @@ -18,7 +18,8 @@ from lerobot.common.optim.optimizers import AdamConfig from lerobot.common.optim.schedulers import NoneSchedulerConfig -from lerobot.configs.policies import NormalizationMode, PretrainedConfig +from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.types import NormalizationMode @PretrainedConfig.register_subclass("tdmpc") @@ -161,6 +162,8 @@ class TDMPCConfig(PretrainedConfig): optimizer_lr: float = 3e-4 def __post_init__(self): + super().__post_init__() + """Input validation (not exhaustive).""" if self.n_gaussian_samples <= 0: raise ValueError( diff --git a/lerobot/common/policies/utils.py b/lerobot/common/policies/utils.py index baf2ab314b..799443a48a 100644 --- a/lerobot/common/policies/utils.py +++ b/lerobot/common/policies/utils.py @@ -13,7 +13,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +from pathlib import Path + import torch +from huggingface_hub import snapshot_download +from huggingface_hub.errors import RepositoryNotFoundError +from huggingface_hub.utils._validators import HFValidationError from torch import nn @@ -64,3 +70,28 @@ def get_output_shape(module: nn.Module, input_shape: tuple) -> tuple: with torch.inference_mode(): output = module(dummy_input) return tuple(output.shape) + + +def get_pretrained_policy_path(pretrained_policy_name_or_path, revision=None): + try: + pretrained_policy_path = Path( + snapshot_download(str(pretrained_policy_name_or_path), revision=revision) + ) + except (HFValidationError, RepositoryNotFoundError) as e: + if isinstance(e, HFValidationError): + error_message = ( + "The provided pretrained_policy_name_or_path is not a valid Hugging Face Hub repo ID." + ) + else: + error_message = ( + "The provided pretrained_policy_name_or_path was not found on the Hugging Face Hub." + ) + + logging.warning(f"{error_message} Treating it as a local directory.") + pretrained_policy_path = Path(pretrained_policy_name_or_path) + if not pretrained_policy_path.is_dir() or not pretrained_policy_path.exists(): + raise ValueError( + "The provided pretrained_policy_name_or_path is not a valid/existing Hugging Face Hub " + "repo ID, nor is it an existing local directory." + ) + return pretrained_policy_path diff --git a/lerobot/common/policies/vqbet/configuration_vqbet.py b/lerobot/common/policies/vqbet/configuration_vqbet.py index 181304f66f..c6f5d542fb 100644 --- a/lerobot/common/policies/vqbet/configuration_vqbet.py +++ b/lerobot/common/policies/vqbet/configuration_vqbet.py @@ -20,7 +20,8 @@ from lerobot.common.optim.optimizers import AdamConfig from lerobot.common.optim.schedulers import VQBeTSchedulerConfig -from lerobot.configs.policies import NormalizationMode, PretrainedConfig +from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.types import NormalizationMode @PretrainedConfig.register_subclass("vqbet") @@ -137,9 +138,12 @@ class VQBeTConfig(PretrainedConfig): optimizer_eps: float = 1e-8 optimizer_weight_decay: float = 1e-6 optimizer_vqvae_lr: float = 1e-3 + optimizer_vqvae_weight_decay: float = 1e-4 scheduler_warmup_steps: int = 500 def __post_init__(self): + super().__post_init__() + """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): raise ValueError( diff --git a/lerobot/common/policies/vqbet/modeling_vqbet.py b/lerobot/common/policies/vqbet/modeling_vqbet.py index ee0c12feb2..aadbd29603 100644 --- a/lerobot/common/policies/vqbet/modeling_vqbet.py +++ b/lerobot/common/policies/vqbet/modeling_vqbet.py @@ -101,11 +101,10 @@ def get_optim_params(self) -> dict: return [ { "params": decay_params, - # "weight_decay": cfg.training.adam_weight_decay, }, { "params": vqvae_params, - "weight_decay": 1e-4, + "weight_decay": self.config.optimizer_vqvae_weight_decay, "lr": self.config.optimizer_vqvae_lr, }, { @@ -658,57 +657,6 @@ def loss_fn(self, pred, target, **kwargs): return loss_dict -class VQBeTOptimizer(torch.optim.Adam): - def __init__(self, policy, cfg): - vqvae_params = ( - list(policy.vqbet.action_head.vqvae_model.encoder.parameters()) - + list(policy.vqbet.action_head.vqvae_model.decoder.parameters()) - + list(policy.vqbet.action_head.vqvae_model.vq_layer.parameters()) - ) - decay_params, no_decay_params = policy.vqbet.policy.configure_parameters() - decay_params = ( - decay_params - + list(policy.vqbet.rgb_encoder.parameters()) - + list(policy.vqbet.state_projector.parameters()) - + list(policy.vqbet.rgb_feature_projector.parameters()) - + [policy.vqbet.action_token] - + list(policy.vqbet.action_head.map_to_cbet_preds_offset.parameters()) - ) - - if cfg.policy.sequentially_select: - decay_params = ( - decay_params - + list(policy.vqbet.action_head.map_to_cbet_preds_primary_bin.parameters()) - + list(policy.vqbet.action_head.map_to_cbet_preds_secondary_bin.parameters()) - ) - else: - decay_params = decay_params + list(policy.vqbet.action_head.map_to_cbet_preds_bin.parameters()) - - optim_groups = [ - { - "params": decay_params, - "weight_decay": cfg.training.adam_weight_decay, - "lr": cfg.training.lr, - }, - { - "params": vqvae_params, - "weight_decay": 0.0001, - "lr": cfg.training.vqvae_lr, - }, - { - "params": no_decay_params, - "weight_decay": 0.0, - "lr": cfg.training.lr, - }, - ] - super().__init__( - optim_groups, - cfg.training.lr, - cfg.training.adam_betas, - cfg.training.adam_eps, - ) - - class VQBeTRgbEncoder(nn.Module): """Encode an RGB image into a 1D feature vector. diff --git a/lerobot/common/robot_devices/control_utils.py b/lerobot/common/robot_devices/control_utils.py index 8cc0f32602..0e609ed5c3 100644 --- a/lerobot/common/robot_devices/control_utils.py +++ b/lerobot/common/robot_devices/control_utils.py @@ -20,10 +20,10 @@ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset from lerobot.common.datasets.utils import get_features_from_robot from lerobot.common.policies.factory import make_policy +from lerobot.common.policies.utils import get_pretrained_policy_path from lerobot.common.robot_devices.robots.utils import Robot from lerobot.common.robot_devices.utils import busy_wait from lerobot.common.utils.utils import get_safe_torch_device, init_hydra_config, set_global_seed -from lerobot.scripts.eval import get_pretrained_policy_path def log_control_info(robot: Robot, dt_s, episode_index=None, frame_index=None, fps=None): @@ -165,7 +165,7 @@ def init_policy(pretrained_policy_name_or_path, policy_overrides): """Instantiate the policy and load fps, device and use_amp from config yaml""" pretrained_policy_path = get_pretrained_policy_path(pretrained_policy_name_or_path) hydra_cfg = init_hydra_config(pretrained_policy_path / "config.yaml", policy_overrides) - policy = make_policy(hydra_cfg=hydra_cfg, pretrained_policy_name_or_path=pretrained_policy_path) + policy = make_policy(cfg=hydra_cfg, pretrained_policy_name_or_path=pretrained_policy_path) # Check device is available device = get_safe_torch_device(hydra_cfg.device, log=True) diff --git a/lerobot/configs/default.py b/lerobot/configs/default.py index 8efe2f96ad..7f4d1ce12a 100644 --- a/lerobot/configs/default.py +++ b/lerobot/configs/default.py @@ -14,86 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime as dt -import logging from dataclasses import dataclass, field -from pathlib import Path -from pprint import pformat - -import draccus -from deepdiff import DeepDiff from lerobot.common import ( - envs, # noqa: F401 policies, # noqa: F401 ) from lerobot.common.datasets.transforms import ImageTransformsConfig -from lerobot.common.optim import OptimizerConfig -from lerobot.common.optim.schedulers import LRSchedulerConfig -from lerobot.configs.policies import PretrainedConfig - - -@dataclass -class OfflineConfig: - steps: int = 100_000 - - -@dataclass -class OnlineConfig: - """ - The online training loop looks something like: - - ```python - for i in range(steps): - do_online_rollout_and_update_online_buffer() - for j in range(steps_between_rollouts): - batch = next(dataloader_with_offline_and_online_data) - loss = policy(batch) - loss.backward() - optimizer.step() - ``` - - Note that the online training loop adopts most of the options from the offline loop unless specified - otherwise. - """ - - steps: int = 0 - # How many episodes to collect at once when we reach the online rollout part of the training loop. - rollout_n_episodes: int = 1 - # The number of environments to use in the gym.vector.VectorEnv. This ends up also being the batch size for - # the policy. Ideally you should set this to by an even divisor of rollout_n_episodes. - rollout_batch_size: int = 1 - # How many optimization steps (forward, backward, optimizer step) to do between running rollouts. - steps_between_rollouts: int | None = None - # The proportion of online samples (vs offline samples) to include in the online training batches. - sampling_ratio: float = 0.5 - # First seed to use for the online rollout environment. Seeds for subsequent rollouts are incremented by 1. - env_seed: int | None = None - # Sets the maximum number of frames that are stored in the online buffer for online training. The buffer is - # FIFO. - buffer_capacity: int | None = None - # The minimum number of frames to have in the online buffer before commencing online training. - # If buffer_seed_size > rollout_n_episodes, the rollout will be run multiple times until the - # seed size condition is satisfied. - buffer_seed_size: int = 0 - # Whether to run the online rollouts asynchronously. This means we can run the online training steps in - # parallel with the rollouts. This might be advised if your GPU has the bandwidth to handle training - # + eval + environment rendering simultaneously. - do_rollout_async: bool = False - - -@dataclass -class TrainConfig: - # Number of workers for the dataloader. - num_workers: int = 4 - batch_size: int = 8 - eval_freq: int = 20_000 - log_freq: int = 200 - save_checkpoint: bool = True - # Checkpoint is saved every `save_freq` training iterations and after the last training step. - save_freq: int = 20_000 - offline: OfflineConfig = field(default_factory=OfflineConfig) - online: OnlineConfig = field(default_factory=OnlineConfig) @dataclass @@ -110,26 +36,6 @@ class DatasetConfig: video_backend: str = "pyav" -@dataclass -class EvalConfig: - n_episodes: int = 50 - # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. - batch_size: int = 50 - # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). - use_async_envs: bool = False - - def __post_init__(self): - if self.batch_size > self.n_episodes: - raise ValueError( - "The eval batch size is greater than the number of eval episodes " - f"({self.batch_size} > {self.n_episodes}). As a result, {self.batch_size} " - f"eval environments will be instantiated, but only {self.n_episodes} will be used. " - "This might significantly slow down evaluation. To fix this, you should update your command " - f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={self.batch_size}`), " - f"or lower the batch size (e.g. `eval.batch_size={self.n_episodes}`)." - ) - - @dataclass class WandBConfig: enable: bool = False @@ -138,93 +44,3 @@ class WandBConfig: project: str = "lerobot" entity: str | None = None notes: str | None = None - - -@dataclass -class MainConfig: - policy: PretrainedConfig - dataset: DatasetConfig - env: envs.EnvConfig = field(default_factory=envs.RealEnv) - # Set `dir` to where you would like to save all of the run outputs. If you run another training session - # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. - dir: Path | None = None - job_name: str | None = None - # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure - # `dir` is the directory of an existing run with at least one checkpoint in it. - # Note that when resuming a run, the default behavior is to use the configuration from the checkpoint, - # regardless of what's provided with the training command at the time of resumption. - resume: bool = False - device: str = "cuda" # | cpu | mp - # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, - # automatic gradient scaling is used. - use_amp: bool = False - # `seed` is used for training (eg: model initialization, dataset shuffling) - # AND for the evaluation environments. - seed: int | None = 1000 - training: TrainConfig = field(default_factory=TrainConfig) - use_policy_training_preset: bool = True - optimizer: OptimizerConfig | None = None - scheduler: LRSchedulerConfig | None = None - eval: EvalConfig = field(default_factory=EvalConfig) - wandb: WandBConfig = field(default_factory=WandBConfig) - - def __post_init__(self): - if not self.job_name: - self.job_name = f"{self.env.type}_{self.policy.type}" - - if not self.dir: - now = dt.datetime.now() - train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" - self.dir = Path("outputs/train") / train_dir - - if self.training.online.steps > 0 and isinstance(self.dataset.repo_id, list): - raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") - - if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): - raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") - elif self.use_policy_training_preset: - self.optimizer = self.policy.get_optimizer_preset() - self.scheduler = self.policy.get_scheduler_preset() - - # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need - # to check for any differences between the provided config and the checkpoint's config. - checkpoint_cfg_path = self.dir / "checkpoints/last/config.yaml" - if self.resume: - if not checkpoint_cfg_path.exists(): - raise RuntimeError( - f"You have set resume=True, but there is no model checkpoint in {self.dir}" - ) - - # Get the configuration file from the last checkpoint. - checkpoint_cfg = self.from_checkpoint(checkpoint_cfg_path) - - # # Check for differences between the checkpoint configuration and provided configuration. - # # Hack to resolve the delta_timestamps ahead of time in order to properly diff. - # resolve_delta_timestamps(cfg) - diff = DeepDiff(checkpoint_cfg, self) - # Ignore the `resume` and parameters. - if "values_changed" in diff and "root['resume']" in diff["values_changed"]: - del diff["values_changed"]["root['resume']"] - # Log a warning about differences between the checkpoint configuration and the provided - # configuration. - if len(diff) > 0: - logging.warning( - "At least one difference was detected between the checkpoint configuration and " - f"the provided configuration: \n{pformat(diff)}\nNote that the checkpoint configuration " - "takes precedence.", - ) - # Use the checkpoint config instead of the provided config (but keep `resume` parameter). - self = checkpoint_cfg - self.resume = True - - elif checkpoint_cfg_path.exists(): - raise RuntimeError( - f"The configured output directory {checkpoint_cfg_path} already exists. If " - "you meant to resume training, please use `resume=true` in your command or yaml configuration." - ) - - @classmethod - def from_checkpoint(cls, config_path: Path): - with open(config_path) as f: - cfg = draccus.load(cls, f) - return cfg diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py new file mode 100644 index 0000000000..74665175a5 --- /dev/null +++ b/lerobot/configs/eval.py @@ -0,0 +1,75 @@ +import datetime as dt +from dataclasses import dataclass, replace +from pathlib import Path + +import draccus + +from lerobot.common import envs +from lerobot.common.policies.utils import get_pretrained_policy_path +from lerobot.configs.policies import PretrainedConfig + + +@dataclass +class EvalConfig: + n_episodes: int = 50 + episode_length: int | None = None + # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. + batch_size: int = 50 + # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). + use_async_envs: bool = False + + def __post_init__(self): + if self.batch_size > self.n_episodes: + raise ValueError( + "The eval batch size is greater than the number of eval episodes " + f"({self.batch_size} > {self.n_episodes}). As a result, {self.batch_size} " + f"eval environments will be instantiated, but only {self.n_episodes} will be used. " + "This might significantly slow down evaluation. To fix this, you should update your command " + f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={self.batch_size}`), " + f"or lower the batch size (e.g. `eval.batch_size={self.n_episodes}`)." + ) + + +@dataclass +class EvalPipelineConfig: + # Either the repo ID of a model hosted on the Hub or a path to a directory containing weights + # saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch + # (useful for debugging). This argument is mutually exclusive with `--config`. + pretrained_policy_path: Path + eval: EvalConfig + env: envs.EnvConfig + policy: PretrainedConfig | None = None + dir: Path | None = None + job_name: str | None = None + device: str = "cuda" # | cpu | mp + # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, + # automatic gradient scaling is used. + use_amp: bool = False + seed: int | None = 1000 + + def __post_init__(self): + # TODO(aliberts, rcadene): move this logic out of the config + from time import sleep + + sleep(1) + self.resolve_policy_name_or_path() + cfg_path = self.pretrained_policy_path / "config.json" + with open(cfg_path) as f: + policy_cfg = draccus.load(PretrainedConfig, f) + + if self.policy is not None: + # override policy config + replace(policy_cfg, self.policy) + + self.policy = policy_cfg + + if not self.job_name: + self.job_name = f"{self.env.type}_{self.policy.type}" + + if not self.dir: + now = dt.datetime.now() + eval_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" + self.dir = Path("outputs/eval") / eval_dir + + def resolve_policy_name_or_path(self): + self.pretrained_policy_path = get_pretrained_policy_path(self.pretrained_policy_path) diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index 022c5cb791..ad860468cb 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -1,27 +1,16 @@ import abc from dataclasses import dataclass, field -from enum import Enum from pprint import pformat import draccus +import gymnasium as gym from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.common.datasets.utils import flatten_dict, get_nested_item +from lerobot.common.envs.configs import EnvConfig from lerobot.common.optim.optimizers import OptimizerConfig from lerobot.common.optim.schedulers import LRSchedulerConfig - - -# Note: We subclass str so that serialization is straightforward -# https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json -class FeatureType(str, Enum): - STATE = "STATE" - VISUAL = "VISUAL" - ENV = "ENV" - ACTION = "ACTION" - - -class NormalizationMode(str, Enum): - MIN_MAX = "MIN_MAX" - MEAN_STD = "MEAN_STD" +from lerobot.configs.types import FeatureType, NormalizationMode @dataclass @@ -48,13 +37,18 @@ class PretrainedConfig(draccus.ChoiceRegistry, abc.ABC): the original scale. """ + type: str = "" + n_obs_steps: int = 1 normalization_mapping: dict[str, NormalizationMode] = field(default_factory=dict) - @property - def type(self) -> str: - return self.get_choice_name(self.__class__) + def __post_init__(self): + self.type = self.get_choice_name(self.__class__) + + # @property + # def type(self) -> str: + # return self.get_choice_name(self.__class__) @abc.abstractproperty def observation_delta_indices(self) -> list | None: @@ -166,3 +160,60 @@ def parse_features_from_dataset(self, ds_meta: LeRobotDatasetMetadata): self.env_state_feature = env_state_features[0] if len(env_state_features) == 1 else None self.action_feature = action_features[0] if len(action_features) == 1 else None self.image_features = image_features + + def parse_features_from_env(self, env: gym.Env, env_cfg: EnvConfig): + robot_state_features = [] + env_state_features = [] + action_features = [] + image_features = [] + + flat_dict = flatten_dict(env_cfg.feature_types) + + for key, _type in flat_dict.items(): + env_ft = ( + env.action_space + if _type is FeatureType.ACTION + else get_nested_item(env.observation_space, key) + ) + shape = env_ft.shape[1:] + if _type is FeatureType.VISUAL: + h, w, c = shape + if not c < h and c < w: + raise ValueError( + f"Expect channel last images for visual feature {key} of {env_cfg.type} env, but instead got {shape=}" + ) + shape = (c, h, w) + + feature = PolicyFeature( + key=key, + type=_type, + shape=shape, + normalization_mode=self.normalization_mapping[_type], + ) + if _type is FeatureType.VISUAL: + image_features.append(feature) + elif _type is FeatureType.STATE: + robot_state_features.append(feature) + elif _type is FeatureType.ENV: + env_state_features.append(feature) + elif _type is FeatureType.ACTION: + action_features.append(feature) + + # TODO(aliberts, rcadene): remove this hardcoding of keys and just use the nested keys as is + # (need to also refactor preprocess_observation and externalize normalization from policies) + for ft in image_features: + if len(ft.key.split("/")) > 1: + ft.key = f"observation.images.{ft.key.split('/')[-1]}" + elif len(ft.key.split("/")) == 1: + image_features[0].key = "observation.image" + + if len(robot_state_features) == 1: + robot_state_features[0].key = "observation.state" + + if len(env_state_features) == 1: + env_state_features[0].key = "observation.environment_state" + + self.robot_state_feature = robot_state_features[0] if len(robot_state_features) == 1 else None + self.env_state_feature = env_state_features[0] if len(env_state_features) == 1 else None + self.action_feature = action_features[0] if len(action_features) == 1 else None + self.image_features = image_features diff --git a/lerobot/configs/training.py b/lerobot/configs/training.py new file mode 100644 index 0000000000..fa4d145c39 --- /dev/null +++ b/lerobot/configs/training.py @@ -0,0 +1,163 @@ +import datetime as dt +import logging +from dataclasses import dataclass, field +from pathlib import Path +from pprint import pformat + +import draccus +from deepdiff import DeepDiff + +from lerobot.common import envs +from lerobot.common.optim import OptimizerConfig +from lerobot.common.optim.schedulers import LRSchedulerConfig +from lerobot.configs.default import DatasetConfig, WandBConfig +from lerobot.configs.eval import EvalConfig +from lerobot.configs.policies import PretrainedConfig + + +@dataclass +class OfflineConfig: + steps: int = 100_000 + + +@dataclass +class OnlineConfig: + """ + The online training loop looks something like: + + ```python + for i in range(steps): + do_online_rollout_and_update_online_buffer() + for j in range(steps_between_rollouts): + batch = next(dataloader_with_offline_and_online_data) + loss = policy(batch) + loss.backward() + optimizer.step() + ``` + + Note that the online training loop adopts most of the options from the offline loop unless specified + otherwise. + """ + + steps: int = 0 + # How many episodes to collect at once when we reach the online rollout part of the training loop. + rollout_n_episodes: int = 1 + # The number of environments to use in the gym.vector.VectorEnv. This ends up also being the batch size for + # the policy. Ideally you should set this to by an even divisor of rollout_n_episodes. + rollout_batch_size: int = 1 + # How many optimization steps (forward, backward, optimizer step) to do between running rollouts. + steps_between_rollouts: int | None = None + # The proportion of online samples (vs offline samples) to include in the online training batches. + sampling_ratio: float = 0.5 + # First seed to use for the online rollout environment. Seeds for subsequent rollouts are incremented by 1. + env_seed: int | None = None + # Sets the maximum number of frames that are stored in the online buffer for online training. The buffer is + # FIFO. + buffer_capacity: int | None = None + # The minimum number of frames to have in the online buffer before commencing online training. + # If buffer_seed_size > rollout_n_episodes, the rollout will be run multiple times until the + # seed size condition is satisfied. + buffer_seed_size: int = 0 + # Whether to run the online rollouts asynchronously. This means we can run the online training steps in + # parallel with the rollouts. This might be advised if your GPU has the bandwidth to handle training + # + eval + environment rendering simultaneously. + do_rollout_async: bool = False + + +@dataclass +class TrainPipelineConfig: + policy: PretrainedConfig + dataset: DatasetConfig + env: envs.EnvConfig = field(default_factory=envs.RealEnv) + # Set `dir` to where you would like to save all of the run outputs. If you run another training session + # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. + dir: Path | None = None + job_name: str | None = None + # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure + # `dir` is the directory of an existing run with at least one checkpoint in it. + # Note that when resuming a run, the default behavior is to use the configuration from the checkpoint, + # regardless of what's provided with the training command at the time of resumption. + resume: bool = False + device: str = "cuda" # | cpu | mp + # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, + # automatic gradient scaling is used. + use_amp: bool = False + # `seed` is used for training (eg: model initialization, dataset shuffling) + # AND for the evaluation environments. + seed: int | None = 1000 + # Number of workers for the dataloader. + num_workers: int = 4 + batch_size: int = 8 + eval_freq: int = 20_000 + log_freq: int = 200 + save_checkpoint: bool = True + # Checkpoint is saved every `save_freq` training iterations and after the last training step. + save_freq: int = 20_000 + offline: OfflineConfig = field(default_factory=OfflineConfig) + online: OnlineConfig = field(default_factory=OnlineConfig) + use_policy_training_preset: bool = True + optimizer: OptimizerConfig | None = None + scheduler: LRSchedulerConfig | None = None + eval: EvalConfig = field(default_factory=EvalConfig) + wandb: WandBConfig = field(default_factory=WandBConfig) + + def __post_init__(self): + if not self.job_name: + self.job_name = f"{self.env.type}_{self.policy.type}" + + if not self.dir: + now = dt.datetime.now() + train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" + self.dir = Path("outputs/train") / train_dir + + if self.online.steps > 0 and isinstance(self.dataset.repo_id, list): + raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") + + if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): + raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") + elif self.use_policy_training_preset: + self.optimizer = self.policy.get_optimizer_preset() + self.scheduler = self.policy.get_scheduler_preset() + + # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need + # to check for any differences between the provided config and the checkpoint's config. + checkpoint_cfg_path = self.dir / "checkpoints/last/config.yaml" + if self.resume: + if not checkpoint_cfg_path.exists(): + raise RuntimeError( + f"You have set resume=True, but there is no model checkpoint in {self.dir}" + ) + + # Get the configuration file from the last checkpoint. + checkpoint_cfg = self.from_checkpoint(checkpoint_cfg_path) + + # # Check for differences between the checkpoint configuration and provided configuration. + # # Hack to resolve the delta_timestamps ahead of time in order to properly diff. + # resolve_delta_timestamps(cfg) + diff = DeepDiff(checkpoint_cfg, self) + # Ignore the `resume` and parameters. + if "values_changed" in diff and "root['resume']" in diff["values_changed"]: + del diff["values_changed"]["root['resume']"] + # Log a warning about differences between the checkpoint configuration and the provided + # configuration. + if len(diff) > 0: + logging.warning( + "At least one difference was detected between the checkpoint configuration and " + f"the provided configuration: \n{pformat(diff)}\nNote that the checkpoint configuration " + "takes precedence.", + ) + # Use the checkpoint config instead of the provided config (but keep `resume` parameter). + self = checkpoint_cfg + self.resume = True + + elif checkpoint_cfg_path.exists(): + raise RuntimeError( + f"The configured output directory {checkpoint_cfg_path} already exists. If " + "you meant to resume training, please use `resume=true` in your command or yaml configuration." + ) + + @classmethod + def from_checkpoint(cls, config_path: Path): + with open(config_path) as f: + cfg = draccus.load(cls, f) + return cfg diff --git a/lerobot/configs/types.py b/lerobot/configs/types.py new file mode 100644 index 0000000000..a5f6ac4fba --- /dev/null +++ b/lerobot/configs/types.py @@ -0,0 +1,20 @@ +# Note: We subclass str so that serialization is straightforward +# https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json +from enum import Enum +from typing import Any, Protocol + + +class FeatureType(str, Enum): + STATE = "STATE" + VISUAL = "VISUAL" + ENV = "ENV" + ACTION = "ACTION" + + +class NormalizationMode(str, Enum): + MIN_MAX = "MIN_MAX" + MEAN_STD = "MEAN_STD" + + +class DictLike(Protocol): + def __getitem__(self, key: Any) -> Any: ... diff --git a/lerobot/scripts/control_sim_robot.py b/lerobot/scripts/control_sim_robot.py index 4fffa8c754..3a7f2288d5 100644 --- a/lerobot/scripts/control_sim_robot.py +++ b/lerobot/scripts/control_sim_robot.py @@ -504,7 +504,7 @@ def replay( # make gym env env_cfg = init_hydra_config(env_config_path) - importlib.import_module(f"gym_{env_cfg.env.name}") + importlib.import_module(f"gym_{env_cfg.env.type}") def env_constructor(): return gym.make(env_cfg.env.handle, disable_env_checker=True, **env_cfg.env.gym) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 040f92d964..868a27c3c5 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -41,28 +41,23 @@ https://huggingface.co/lerobot/diffusion_pusht/tree/main. """ -import argparse import json import logging import threading import time from contextlib import nullcontext from copy import deepcopy -from datetime import datetime as dt from pathlib import Path from typing import Callable +import draccus import einops import gymnasium as gym import numpy as np import torch -from huggingface_hub import snapshot_download -from huggingface_hub.errors import RepositoryNotFoundError -from huggingface_hub.utils._validators import HFValidationError from torch import Tensor, nn from tqdm import trange -from lerobot.common.datasets.factory import make_dataset from lerobot.common.envs.factory import make_env from lerobot.common.envs.utils import preprocess_observation from lerobot.common.logger import log_output_dir @@ -72,11 +67,10 @@ from lerobot.common.utils.io_utils import write_video from lerobot.common.utils.utils import ( get_safe_torch_device, - init_hydra_config, - init_logging, inside_slurm, set_global_seed, ) +from lerobot.configs.eval import EvalPipelineConfig def rollout( @@ -442,66 +436,43 @@ def _compile_episode_data( return data_dict -def main( - pretrained_policy_path: Path | None = None, - hydra_cfg_path: str | None = None, - out_dir: str | None = None, - config_overrides: list[str] | None = None, -): - assert (pretrained_policy_path is None) ^ (hydra_cfg_path is None) - if pretrained_policy_path is not None: - hydra_cfg = init_hydra_config(str(pretrained_policy_path / "config.yaml"), config_overrides) - else: - hydra_cfg = init_hydra_config(hydra_cfg_path, config_overrides) - - if hydra_cfg.eval.batch_size > hydra_cfg.eval.n_episodes: - raise ValueError( - "The eval batch size is greater than the number of eval episodes " - f"({hydra_cfg.eval.batch_size} > {hydra_cfg.eval.n_episodes}). As a result, {hydra_cfg.eval.batch_size} " - f"eval environments will be instantiated, but only {hydra_cfg.eval.n_episodes} will be used. " - "This might significantly slow down evaluation. To fix this, you should update your command " - f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={hydra_cfg.eval.batch_size}`), " - f"or lower the batch size (e.g. `eval.batch_size={hydra_cfg.eval.n_episodes}`)." - ) - - if out_dir is None: - out_dir = f"outputs/eval/{dt.now().strftime('%Y-%m-%d/%H-%M-%S')}_{hydra_cfg.env.name}_{hydra_cfg.policy.name}" - +@draccus.wrap() +def eval(cfg: EvalPipelineConfig): # Check device is available - device = get_safe_torch_device(hydra_cfg.device, log=True) + device = get_safe_torch_device(cfg.device, log=True) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True - set_global_seed(hydra_cfg.seed) + set_global_seed(cfg.seed) - log_output_dir(out_dir) + log_output_dir(cfg.dir) logging.info("Making environment.") - env = make_env(hydra_cfg) + env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs) logging.info("Making policy.") - if hydra_cfg_path is None: - policy = make_policy(hydra_cfg=hydra_cfg, pretrained_policy_name_or_path=str(pretrained_policy_path)) - else: - # Note: We need the dataset stats to pass to the policy's normalization modules. - policy = make_policy(hydra_cfg=hydra_cfg, dataset_stats=make_dataset(hydra_cfg).meta.stats) - - assert isinstance(policy, nn.Module) + policy = make_policy( + cfg=cfg.policy, + device=device, + env=env, + env_cfg=cfg.env, + pretrained_policy_name_or_path=str(cfg.pretrained_policy_path), + ) policy.eval() - with torch.no_grad(), torch.autocast(device_type=device.type) if hydra_cfg.use_amp else nullcontext(): + with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.use_amp else nullcontext(): info = eval_policy( env, policy, - hydra_cfg.eval.n_episodes, + cfg.eval.n_episodes, max_episodes_rendered=10, - videos_dir=Path(out_dir) / "videos", - start_seed=hydra_cfg.seed, + videos_dir=Path(cfg.dir) / "videos", + start_seed=cfg.seed, ) print(info["aggregated"]) # Save info - with open(Path(out_dir) / "eval_info.json", "w") as f: + with open(Path(cfg.dir) / "eval_info.json", "w") as f: json.dump(info, f, indent=2) env.close() @@ -509,76 +480,5 @@ def main( logging.info("End of eval") -def get_pretrained_policy_path(pretrained_policy_name_or_path, revision=None): - try: - pretrained_policy_path = Path(snapshot_download(pretrained_policy_name_or_path, revision=revision)) - except (HFValidationError, RepositoryNotFoundError) as e: - if isinstance(e, HFValidationError): - error_message = ( - "The provided pretrained_policy_name_or_path is not a valid Hugging Face Hub repo ID." - ) - else: - error_message = ( - "The provided pretrained_policy_name_or_path was not found on the Hugging Face Hub." - ) - - logging.warning(f"{error_message} Treating it as a local directory.") - pretrained_policy_path = Path(pretrained_policy_name_or_path) - if not pretrained_policy_path.is_dir() or not pretrained_policy_path.exists(): - raise ValueError( - "The provided pretrained_policy_name_or_path is not a valid/existing Hugging Face Hub " - "repo ID, nor is it an existing local directory." - ) - return pretrained_policy_path - - if __name__ == "__main__": - init_logging() - - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter - ) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument( - "-p", - "--pretrained-policy-name-or-path", - help=( - "Either the repo ID of a model hosted on the Hub or a path to a directory containing weights " - "saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch " - "(useful for debugging). This argument is mutually exclusive with `--config`." - ), - ) - group.add_argument( - "--config", - help=( - "Path to a yaml config you want to use for initializing a policy from scratch (useful for " - "debugging). This argument is mutually exclusive with `--pretrained-policy-name-or-path` (`-p`)." - ), - ) - parser.add_argument("--revision", help="Optionally provide the Hugging Face Hub revision ID.") - parser.add_argument( - "--out-dir", - help=( - "Where to save the evaluation outputs. If not provided, outputs are saved in " - "outputs/eval/{timestamp}_{env_name}_{policy_name}" - ), - ) - parser.add_argument( - "overrides", - nargs="*", - help="Any key=value arguments to override config values (use dots for.nested=overrides)", - ) - args = parser.parse_args() - - if args.pretrained_policy_name_or_path is None: - main(hydra_cfg_path=args.config, out_dir=args.out_dir, config_overrides=args.overrides) - else: - pretrained_policy_path = get_pretrained_policy_path( - args.pretrained_policy_name_or_path, revision=args.revision - ) - - main( - pretrained_policy_path=pretrained_policy_path, - out_dir=args.out_dir, - config_overrides=args.overrides, - ) + eval() diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 80ef41eaa6..8769ed4a78 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -44,7 +44,7 @@ init_logging, set_global_seed, ) -from lerobot.configs.default import MainConfig +from lerobot.configs.training import TrainPipelineConfig from lerobot.scripts.eval import eval_policy @@ -106,7 +106,7 @@ def update_policy( def log_train_info( - logger: Logger, info: dict, step: int, cfg: MainConfig, dataset: LeRobotDataset, is_online: bool + logger: Logger, info: dict, step: int, cfg: TrainPipelineConfig, dataset: LeRobotDataset, is_online: bool ): loss = info["loss"] grad_norm = info["grad_norm"] @@ -116,7 +116,7 @@ def log_train_info( # A sample is an (observation,action) pair, where observation and action # can be on multiple timestamps. In a batch, we have `batch_size`` number of samples. - num_samples = (step + 1) * cfg.training.batch_size + num_samples = (step + 1) * cfg.batch_size avg_samples_per_ep = dataset.num_frames / dataset.num_episodes num_episodes = num_samples / avg_samples_per_ep num_epochs = num_samples / dataset.num_frames @@ -153,7 +153,7 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): # A sample is an (observation,action) pair, where observation and action # can be on multiple timestamps. In a batch, we have `batch_size`` number of samples. - num_samples = (step + 1) * cfg.training.batch_size + num_samples = (step + 1) * cfg.batch_size avg_samples_per_ep = dataset.num_frames / dataset.num_episodes num_episodes = num_samples / avg_samples_per_ep num_epochs = num_samples / dataset.num_frames @@ -181,7 +181,7 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): @draccus.wrap() -def train(cfg: MainConfig): +def train(cfg: TrainPipelineConfig): init_logging() logging.info(pformat(asdict(cfg))) @@ -204,13 +204,14 @@ def train(cfg: MainConfig): # On real-world data, no need to create an environment as evaluations are done outside train.py, # using the eval.py instead, with gym_dora environment and dora-rs. eval_env = None - if cfg.training.eval_freq > 0 and cfg.env.type != "real_world": + if cfg.eval_freq > 0 and cfg.env.type != "real_world": logging.info("Creating env") - eval_env = make_env(cfg) + eval_env = make_env(cfg.env, n_envs=cfg.eval.batch_size) logging.info("Creating policy") policy = make_policy( - cfg=cfg, + cfg=cfg.policy, + device=device, ds_meta=offline_dataset.meta, pretrained_policy_name_or_path=str(logger.last_pretrained_model_dir) if cfg.resume else None, ) @@ -228,8 +229,8 @@ def train(cfg: MainConfig): log_output_dir(cfg.dir) logging.info(f"{cfg.env.task=}") - logging.info(f"{cfg.training.offline.steps=} ({format_big_number(cfg.training.offline.steps)})") - logging.info(f"{cfg.training.online.steps=}") + logging.info(f"{cfg.offline.steps=} ({format_big_number(cfg.offline.steps)})") + logging.info(f"{cfg.online.steps=}") logging.info(f"{offline_dataset.num_frames=} ({format_big_number(offline_dataset.num_frames)})") logging.info(f"{offline_dataset.num_episodes=}") logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})") @@ -237,10 +238,10 @@ def train(cfg: MainConfig): # Note: this helper will be used in offline and online training loops. def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): - _num_digits = max(6, len(str(cfg.training.offline.steps + cfg.training.online.steps))) + _num_digits = max(6, len(str(cfg.offline.steps + cfg.online.steps))) step_identifier = f"{step:0{_num_digits}d}" - if cfg.training.eval_freq > 0 and step % cfg.training.eval_freq == 0: + if cfg.eval_freq > 0 and step % cfg.eval_freq == 0: logging.info(f"Eval policy at step {step}") with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.use_amp else nullcontext(): assert eval_env is not None @@ -257,9 +258,8 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): logger.log_video(eval_info["video_paths"][0], step, mode="eval") logging.info("Resume training") - if cfg.training.save_checkpoint and ( - step % cfg.training.save_freq == 0 - or step == cfg.training.offline.steps + cfg.training.online.steps + if cfg.save_checkpoint and ( + step % cfg.save_freq == 0 or step == cfg.offline.steps + cfg.online.steps ): logging.info(f"Checkpoint policy after step {step}") # Note: Save with step as the identifier, and format it to have at least 6 digits but more if @@ -286,8 +286,8 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): sampler = None dataloader = torch.utils.data.DataLoader( offline_dataset, - num_workers=cfg.training.num_workers, - batch_size=cfg.training.batch_size, + num_workers=cfg.num_workers, + batch_size=cfg.batch_size, shuffle=shuffle, sampler=sampler, pin_memory=device.type != "cpu", @@ -297,7 +297,7 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): policy.train() offline_step = 0 - for _ in range(step, cfg.training.offline.steps): + for _ in range(step, cfg.offline.steps): if offline_step == 0: logging.info("Start offline training on a fixed dataset") @@ -320,7 +320,7 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): train_info["dataloading_s"] = dataloading_s - if step % cfg.training.log_freq == 0: + if step % cfg.log_freq == 0: log_train_info(logger, train_info, step, cfg, offline_dataset, is_online=False) # Note: evaluate_and_checkpoint_if_needed happens **after** the `step`th training update has completed, @@ -330,7 +330,7 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): step += 1 offline_step += 1 # noqa: SIM113 - if cfg.training.online.steps == 0: + if cfg.online.steps == 0: if eval_env: eval_env.close() logging.info("End of training") @@ -339,7 +339,7 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): # Online training. # Create an env dedicated to online episodes collection from policy rollout. - online_env = make_env(cfg) + online_env = make_env(cfg.env, n_envs=cfg.online.rollout_batch_size) detla_timestamps = resolve_delta_timestamps(cfg.policy, offline_dataset.meta) online_buffer_path = logger.log_dir / "online_buffer" if cfg.resume and not online_buffer_path.exists(): @@ -366,14 +366,14 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): "next.done": {"shape": (), "dtype": np.dtype("?")}, "next.success": {"shape": (), "dtype": np.dtype("?")}, }, - buffer_capacity=cfg.training.online.buffer_capacity, + buffer_capacity=cfg.online.buffer_capacity, fps=online_env.unwrapped.metadata["render_fps"], delta_timestamps=detla_timestamps, ) # If we are doing online rollouts asynchronously, deepcopy the policy to use for online rollouts (this # makes it possible to do online rollouts in parallel with training updates). - online_rollout_policy = deepcopy(policy) if cfg.training.online.do_rollout_async else policy + online_rollout_policy = deepcopy(policy) if cfg.online.do_rollout_async else policy # Create dataloader for online training. concat_dataset = torch.utils.data.ConcatDataset([offline_dataset, online_dataset]) @@ -384,7 +384,7 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): # +1 because online rollouts return an extra frame for the "final observation". Note: we don't have # this final observation in the offline datasets, but we might add them in future. online_drop_n_last_frames=getattr(cfg.policy, "drop_n_last_frames", 0) + 1, - online_sampling_ratio=cfg.training.online.sampling_ratio, + online_sampling_ratio=cfg.online.sampling_ratio, ) sampler = torch.utils.data.WeightedRandomSampler( sampler_weights, @@ -393,8 +393,8 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): ) dataloader = torch.utils.data.DataLoader( concat_dataset, - batch_size=cfg.training.batch_size, - num_workers=cfg.training.num_workers, + batch_size=cfg.batch_size, + num_workers=cfg.num_workers, sampler=sampler, pin_memory=device.type != "cpu", drop_last=True, @@ -414,10 +414,10 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): # Time taken waiting for the online buffer to finish being updated. This is relevant when using the async # online rollout option. await_update_online_buffer_s = 0 - rollout_start_seed = cfg.training.online.env_seed + rollout_start_seed = cfg.online.env_seed while True: - if online_step == cfg.training.online.steps: + if online_step == cfg.online.steps: break if online_step == 0: @@ -433,13 +433,11 @@ def sample_trajectory_and_update_buffer(): eval_info = eval_policy( online_env, online_rollout_policy, - n_episodes=cfg.training.online.rollout_n_episodes, - max_episodes_rendered=min(10, cfg.training.online.rollout_n_episodes), + n_episodes=cfg.online.rollout_n_episodes, + max_episodes_rendered=min(10, cfg.online.rollout_n_episodes), videos_dir=logger.log_dir / "online_rollout_videos", return_episode_data=True, - start_seed=( - rollout_start_seed := (rollout_start_seed + cfg.training.batch_size) % 1000000 - ), + start_seed=(rollout_start_seed := (rollout_start_seed + cfg.batch_size) % 1000000), ) online_rollout_s = time.perf_counter() - start_rollout_time @@ -458,7 +456,7 @@ def sample_trajectory_and_update_buffer(): # +1 because online rollouts return an extra frame for the "final observation". Note: we don't have # this final observation in the offline datasets, but we might add them in future. online_drop_n_last_frames=getattr(cfg.policy, "drop_n_last_frames", 0) + 1, - online_sampling_ratio=cfg.training.online.sampling_ratio, + online_sampling_ratio=cfg.online.sampling_ratio, ) sampler.num_frames = len(concat_dataset) @@ -469,20 +467,15 @@ def sample_trajectory_and_update_buffer(): future = executor.submit(sample_trajectory_and_update_buffer) # If we aren't doing async rollouts, or if we haven't yet gotten enough examples in our buffer, wait # here until the rollout and buffer update is done, before proceeding to the policy update steps. - if ( - not cfg.training.online.do_rollout_async - or len(online_dataset) <= cfg.training.online.buffer_seed_size - ): + if not cfg.online.do_rollout_async or len(online_dataset) <= cfg.online.buffer_seed_size: online_rollout_s, update_online_buffer_s = future.result() - if len(online_dataset) <= cfg.training.online.buffer_seed_size: - logging.info( - f"Seeding online buffer: {len(online_dataset)}/{cfg.training.online.buffer_seed_size}" - ) + if len(online_dataset) <= cfg.online.buffer_seed_size: + logging.info(f"Seeding online buffer: {len(online_dataset)}/{cfg.online.buffer_seed_size}") continue policy.train() - for _ in range(cfg.training.online.steps_between_rollouts): + for _ in range(cfg.online.steps_between_rollouts): with lock: start_time = time.perf_counter() batch = next(dl_iter) @@ -509,7 +502,7 @@ def sample_trajectory_and_update_buffer(): with lock: train_info["online_buffer_size"] = len(online_dataset) - if step % cfg.training.log_freq == 0: + if step % cfg.log_freq == 0: log_train_info(logger, train_info, step, cfg, online_dataset, is_online=True) # Note: evaluate_and_checkpoint_if_needed happens **after** the `step`th training update has completed, @@ -526,7 +519,7 @@ def sample_trajectory_and_update_buffer(): online_rollout_s, update_online_buffer_s = future.result() await_update_online_buffer_s = time.perf_counter() - start - if online_step >= cfg.training.online.steps: + if online_step >= cfg.online.steps: break if eval_env: From 72e84f27b096271265f643deb212cce2922f49e4 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 6 Jan 2025 18:07:23 +0100 Subject: [PATCH 11/94] Add custom parser --- lerobot/configs/parser.py | 98 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 lerobot/configs/parser.py diff --git a/lerobot/configs/parser.py b/lerobot/configs/parser.py new file mode 100644 index 0000000000..c2f6c1a0ea --- /dev/null +++ b/lerobot/configs/parser.py @@ -0,0 +1,98 @@ +import inspect +import sys +from argparse import ArgumentError +from dataclasses import Field +from functools import wraps +from pathlib import Path +from typing import Sequence + +import draccus + +PATH_KEY = "path" +draccus.set_config_type("json") + + +def get_cli_overrides(field_name: str, args: Sequence[str] | None = None) -> list[str] | None: + """Parses arguments from cli at a given nested attribute level. + + For example, supposing the main script was called with: + python myscript.py --arg1=1 --arg2.subarg1=abc --arg2.subarg2=some/path + + If called during execution of myscript.py, get_cli_overrides("arg2") will return: + ["--subarg1=abc" "--subarg2=some/path"] + """ + if args is None: + args = sys.argv[1:] + attr_level_args = [] + detect_string = f"--{field_name}." + exclude_strings = (f"--{field_name}.{draccus.CHOICE_TYPE_KEY}=", f"--{field_name}.{PATH_KEY}=") + for arg in args: + if arg.startswith(detect_string) and not arg.startswith(exclude_strings): + denested_arg = f"--{arg.removeprefix(detect_string)}" + attr_level_args.append(denested_arg) + + return attr_level_args + + +def parse_arg(arg_name: str, args: Sequence[str] | None = None) -> str | None: + if args is None: + args = sys.argv[1:] + prefix = f"--{arg_name}=" + for arg in args: + if arg.startswith(prefix): + return arg[len(prefix) :] + return None + + +def get_path_arg(field_name: str, args: Sequence[str] | None = None) -> str | None: + return parse_arg(f"{field_name}.{PATH_KEY}", args) + + +def get_type_arg(field_name: str, args: Sequence[str] | None = None) -> str | None: + return parse_arg(f"{field_name}.{draccus.CHOICE_TYPE_KEY}", args) + + +def preprocess_path_args(fields: Field | list[Field], args: Sequence[str] | None = None) -> list[str]: + if isinstance(fields, Field): + fields = [fields] + + filtered_args = args + for field in fields: + if get_path_arg(field.name, args): + if get_type_arg(field.name, args): + raise ArgumentError( + argument=None, + message=f"Cannot specify both --{field.name}.{PATH_KEY} and --{field.name}.{draccus.CHOICE_TYPE_KEY}", + ) + filtered_args = [arg for arg in filtered_args if not arg.startswith(f"--{field.name}.")] + + return filtered_args + + +def wrap(config_path: Path | None = None, pathable_args: str | list[str] | None = None): + """ + HACK: Similar to draccus.wrap but adds a preprocessing of CLI args in order to overload specific parts of + the config from a config file at that particular nesting level. + """ + + def wrapper_outer(fn): + @wraps(fn) + def wrapper_inner(*args, **kwargs): + argspec = inspect.getfullargspec(fn) + argtype = argspec.annotations[argspec.args[0]] + if len(args) > 0 and type(args[0]) is argtype: + cfg = args[0] + args = args[1:] + else: + cli_args = sys.argv[1:] + get_path_fields = getattr(argtype, "__get_path_fields__", None) + if get_path_fields: + path_fields = get_path_fields() + cli_args = preprocess_path_args(path_fields, cli_args) + cfg = draccus.parse(config_class=argtype, config_path=config_path, args=cli_args) + response = fn(cfg, *args, **kwargs) + return response + + return wrapper_inner + + return wrapper_outer From f6443d9a1d11efd65bfca33797995dad77228a58 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 6 Jan 2025 18:10:26 +0100 Subject: [PATCH 12/94] Update pretrained loading mechanisms --- lerobot/common/logger.py | 34 ++++------- lerobot/common/optim/factory.py | 21 +++++++ lerobot/common/policies/factory.py | 22 +++---- lerobot/configs/eval.py | 42 ++++++------- lerobot/configs/policies.py | 72 ++++++++++++++++++++-- lerobot/configs/training.py | 95 ++++++++++++++---------------- lerobot/scripts/eval.py | 12 ++-- lerobot/scripts/train.py | 13 ++-- 8 files changed, 178 insertions(+), 133 deletions(-) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 8709047e6c..31f90a85be 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -33,10 +33,13 @@ from torch.optim.lr_scheduler import LRScheduler from lerobot.common.policies.policy_protocol import Policy -from lerobot.common.utils.utils import get_global_random_state, set_global_random_state -from lerobot.configs.training import TrainPipelineConfig +from lerobot.common.utils.utils import get_global_random_state +from lerobot.configs.training import TRAIN_CONFIG_FILE, TrainPipelineConfig from lerobot.configs.types import FeatureType, NormalizationMode +PRETRAINED_MODEL = "pretrained_model" +TRAINING_STATE = "training_state.pth" + def log_output_dir(out_dir): logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {out_dir}") @@ -83,12 +86,12 @@ class Logger: │ └── last # a softlink to the last logged checkpoint """ - pretrained_model_dir_name = "pretrained_model" - training_state_file_name = "training_state.pth" + pretrained_model_dir_name = PRETRAINED_MODEL + training_state_file_name = TRAINING_STATE def __init__(self, cfg: TrainPipelineConfig): self._cfg = cfg - self.log_dir = cfg.dir + self.log_dir = cfg.output_dir self.log_dir.mkdir(parents=True, exist_ok=True) self.job_name = cfg.job_name self.checkpoints_dir = self.get_checkpoints_dir(self.log_dir) @@ -158,8 +161,8 @@ def save_model(self, save_dir: Path, policy: Policy, wandb_artifact_name: str | # register_features_types() policy.save_pretrained(save_dir) # Also save the full config for the env configuration. - with open(save_dir / "config.yaml", "w") as f: - draccus.dump(self._cfg, f) + with open(save_dir / TRAIN_CONFIG_FILE, "w") as f: + draccus.dump(self._cfg, f, indent=4) if self._wandb and not self._cfg.wandb.disable_artifact: # note wandb artifact does not accept ":" or "/" in its name artifact = self._wandb.Artifact(wandb_artifact_name, type="model") @@ -209,23 +212,6 @@ def save_checkpoint( self.save_training_state(checkpoint_dir, train_step, optimizer, scheduler) os.symlink(checkpoint_dir.absolute(), self.last_checkpoint_dir) - def load_last_training_state(self, optimizer: Optimizer, scheduler: LRScheduler | None) -> int: - """ - Given the last checkpoint in the logging directory, load the optimizer state, scheduler state, and - random state, and return the global training step. - """ - training_state = torch.load(self.last_checkpoint_dir / self.training_state_file_name) - optimizer.load_state_dict(training_state["optimizer"]) - if scheduler is not None: - scheduler.load_state_dict(training_state["scheduler"]) - elif "scheduler" in training_state: - raise ValueError( - "The checkpoint contains a scheduler state_dict, but no LRScheduler was provided." - ) - # Small hack to get the expected keys: use `get_global_random_state`. - set_global_random_state({k: training_state[k] for k in get_global_random_state()}) - return training_state["step"] - def log_dict(self, d: dict, step: int, mode: str = "train"): assert mode in {"train", "eval"} # TODO(alexander-soare): Add local text log. diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py index 81f3352682..cc27313965 100644 --- a/lerobot/common/optim/factory.py +++ b/lerobot/common/optim/factory.py @@ -14,10 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from pathlib import Path + +import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LRScheduler +from lerobot.common.logger import TRAINING_STATE from lerobot.common.policies import Policy +from lerobot.common.utils.utils import get_global_random_state, set_global_random_state from lerobot.configs.training import TrainPipelineConfig @@ -28,3 +33,19 @@ def make_optimizer_and_scheduler( optimizer = cfg.optimizer.build(params) lr_scheduler = cfg.scheduler.build(optimizer, cfg.offline.steps) return optimizer, lr_scheduler + + +def load_training_state(checkpoint_dir: Path, optimizer: Optimizer, scheduler: LRScheduler | None) -> int: + """ + Given the checkpoint directory, load the optimizer state, scheduler state, and random state, and + return the global training step. + """ + training_state = torch.load(checkpoint_dir / TRAINING_STATE) + optimizer.load_state_dict(training_state["optimizer"]) + if scheduler is not None: + scheduler.load_state_dict(training_state["scheduler"]) + elif "scheduler" in training_state: + raise ValueError("The checkpoint contains a scheduler state_dict, but no LRScheduler was provided.") + # Small HACK to get the expected keys: use `get_global_random_state`. + set_global_random_state({k: training_state[k] for k in get_global_random_state()}) + return training_state["step"], optimizer, scheduler diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 9f215c7467..5036269111 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -51,7 +51,6 @@ def make_policy( ds_meta: LeRobotDatasetMetadata | None = None, env: gym.Env | None = None, env_cfg: EnvConfig | None = None, - pretrained_policy_name_or_path: str | None = None, ) -> Policy: """Make an instance of a policy class. @@ -67,7 +66,8 @@ def make_policy( if not (ds_meta is None) ^ (env is None and env_cfg is None): raise ValueError("Either one of a dataset metadata or a sim env must be provided.") - # Note: Currently, if you try to run vqbet with mps backend, you'll get this error. + # NOTE: Currently, if you try to run vqbet with mps backend, you'll get this error. + # TODO(aliberts, rcadene): Implement a check_backend_compatibility in policies? # NotImplementedError: The operator 'aten::unique_dim' is not currently implemented for the MPS device. If # you want this op to be added in priority during the prototype phase of this feature, please comment on # https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment @@ -90,20 +90,14 @@ def make_policy( kwargs["config"] = cfg - if pretrained_policy_name_or_path is None: - # Make a fresh policy. - policy = policy_cls(**kwargs) - else: - kwargs["pretrained_model_name_or_path"] = pretrained_policy_name_or_path - policy = policy_cls.from_pretrained(**kwargs) + if cfg.pretrained_path: # Load a pretrained policy and override the config if needed (for example, if there are inference-time # hyperparameters that we want to vary). - # TODO(alexander-soare): This hack makes use of huggingface_hub's tooling to load the policy with, - # pretrained weights which are then loaded into a fresh policy with the desired config. This PR in - # huggingface_hub should make it possible to avoid the hack: - # https://github.com/huggingface/huggingface_hub/pull/2274. - # policy = policy_cls(cfg) - # policy.load_state_dict(policy_cls.from_pretrained(pretrained_policy_name_or_path).state_dict()) + kwargs["pretrained_model_name_or_path"] = cfg.pretrained_path + policy = policy_cls.from_pretrained(**kwargs) + else: + # Make a fresh policy. + policy = policy_cls(**kwargs) policy.to(device) assert isinstance(policy, nn.Module) diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index 74665175a5..97156f1ca0 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -1,11 +1,9 @@ import datetime as dt -from dataclasses import dataclass, replace +from dataclasses import Field, dataclass, fields from pathlib import Path -import draccus - -from lerobot.common import envs -from lerobot.common.policies.utils import get_pretrained_policy_path +from lerobot.common import envs # F401: noqa +from lerobot.configs import parser from lerobot.configs.policies import PretrainedConfig @@ -35,11 +33,10 @@ class EvalPipelineConfig: # Either the repo ID of a model hosted on the Hub or a path to a directory containing weights # saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch # (useful for debugging). This argument is mutually exclusive with `--config`. - pretrained_policy_path: Path eval: EvalConfig env: envs.EnvConfig policy: PretrainedConfig | None = None - dir: Path | None = None + output_dir: Path | None = None job_name: str | None = None device: str = "cuda" # | cpu | mp # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, @@ -48,28 +45,23 @@ class EvalPipelineConfig: seed: int | None = 1000 def __post_init__(self): - # TODO(aliberts, rcadene): move this logic out of the config - from time import sleep - - sleep(1) - self.resolve_policy_name_or_path() - cfg_path = self.pretrained_policy_path / "config.json" - with open(cfg_path) as f: - policy_cfg = draccus.load(PretrainedConfig, f) - - if self.policy is not None: - # override policy config - replace(policy_cfg, self.policy) - - self.policy = policy_cfg + # HACK: We parse again the cli args here to get the pretrained path if there was one. + policy_path = parser.get_path_arg("policy") + if policy_path: + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.set_pretrained_path(policy_path) if not self.job_name: self.job_name = f"{self.env.type}_{self.policy.type}" - if not self.dir: + if not self.output_dir: now = dt.datetime.now() eval_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" - self.dir = Path("outputs/eval") / eval_dir + self.output_dir = Path("outputs/eval") / eval_dir - def resolve_policy_name_or_path(self): - self.pretrained_policy_path = get_pretrained_policy_path(self.pretrained_policy_path) + @classmethod + def __get_path_fields__(cls) -> list[Field]: + """This enables the parser to load config from the policy using `--policy.path=local/dir`""" + path_fields = ["policy"] + return [f for f in fields(cls) if f.name in path_fields] diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index ad860468cb..d85287cd25 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -1,9 +1,15 @@ import abc +import os +from copy import copy from dataclasses import dataclass, field +from pathlib import Path from pprint import pformat +from typing import Type, TypeVar import draccus import gymnasium as gym +from huggingface_hub import ModelHubMixin, hf_hub_download +from huggingface_hub.errors import HfHubHTTPError from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata from lerobot.common.datasets.utils import flatten_dict, get_nested_item @@ -12,6 +18,9 @@ from lerobot.common.optim.schedulers import LRSchedulerConfig from lerobot.configs.types import FeatureType, NormalizationMode +# Generic variable that is either ModelHubMixin or a subclass thereof +T = TypeVar("T", bound="PretrainedConfig") + @dataclass class PolicyFeature: @@ -22,7 +31,7 @@ class PolicyFeature: @dataclass -class PretrainedConfig(draccus.ChoiceRegistry, abc.ABC): +class PretrainedConfig(draccus.ChoiceRegistry, ModelHubMixin, abc.ABC): """ Base configuration class for policy models. @@ -37,18 +46,22 @@ class PretrainedConfig(draccus.ChoiceRegistry, abc.ABC): the original scale. """ - type: str = "" + type: str | None = "" n_obs_steps: int = 1 - normalization_mapping: dict[str, NormalizationMode] = field(default_factory=dict) def __post_init__(self): self.type = self.get_choice_name(self.__class__) + self.pretrained_path = None # @property - # def type(self) -> str: - # return self.get_choice_name(self.__class__) + # def pretrained_path(self) -> str | Path | None: + # return self._pretrained_path + + # @pretrained_path.setter + # def pretrained_path(self, path: str | Path): + # self._pretrained_path = path @abc.abstractproperty def observation_delta_indices(self) -> list | None: @@ -87,6 +100,55 @@ def input_features(self) -> list[PolicyFeature]: def output_features(self) -> list[PolicyFeature]: return [self.action_feature] + def _save_pretrained(self, save_directory: Path) -> None: + to_save = copy(self) + del to_save.path + with open(save_directory / "config.json", "w") as f, draccus.config_type("json"): + draccus.dump(self, f, indent=4) + + @classmethod + def from_pretrained( + cls: Type[T], + pretrained_model_name_or_path: str | Path, + *, + force_download: bool = False, + resume_download: bool = None, + proxies: dict | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + **model_kwargs, + ) -> T: + model_id = str(pretrained_model_name_or_path) + config_file: str | None = None + if Path(model_id).is_dir(): + if "config.json" in os.listdir(model_id): + config_file = os.path.join(model_id, "config.json") + else: + print(f"config.json not found in {Path(model_id).resolve()}") + else: + try: + config_file = hf_hub_download( + repo_id=model_id, + filename="config.json", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + except HfHubHTTPError as e: + print(f"config.json not found on the HuggingFace Hub: {str(e)}") + + # HACK: this is very ugly, ideally we'd like to be able to do that natively with draccus + # something like --policy.path (in addition to --policy.type) + cli_overrides = model_kwargs.pop("cli_overrides", []) + instance = draccus.parse(cls, config_file, args=[]) + return draccus.parse(instance.__class__, config_file, args=cli_overrides) + def parse_features_from_dataset(self, ds_meta: LeRobotDatasetMetadata): # TODO(aliberts): Implement PolicyFeature in LeRobotDataset and remove the need for this robot_state_features = [] diff --git a/lerobot/configs/training.py b/lerobot/configs/training.py index fa4d145c39..ca0b6656e7 100644 --- a/lerobot/configs/training.py +++ b/lerobot/configs/training.py @@ -1,19 +1,17 @@ import datetime as dt -import logging -from dataclasses import dataclass, field +from dataclasses import Field, dataclass, field, fields from pathlib import Path -from pprint import pformat - -import draccus -from deepdiff import DeepDiff from lerobot.common import envs from lerobot.common.optim import OptimizerConfig from lerobot.common.optim.schedulers import LRSchedulerConfig +from lerobot.configs import parser from lerobot.configs.default import DatasetConfig, WandBConfig from lerobot.configs.eval import EvalConfig from lerobot.configs.policies import PretrainedConfig +TRAIN_CONFIG_FILE = "train_config.json" + @dataclass class OfflineConfig: @@ -66,12 +64,12 @@ class OnlineConfig: @dataclass class TrainPipelineConfig: - policy: PretrainedConfig dataset: DatasetConfig env: envs.EnvConfig = field(default_factory=envs.RealEnv) + policy: PretrainedConfig | None = None # Set `dir` to where you would like to save all of the run outputs. If you run another training session # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. - dir: Path | None = None + output_dir: Path | None = None job_name: str | None = None # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure # `dir` is the directory of an existing run with at least one checkpoint in it. @@ -102,62 +100,55 @@ class TrainPipelineConfig: wandb: WandBConfig = field(default_factory=WandBConfig) def __post_init__(self): + self.checkpoint_path = None + + # HACK: We parse again the cli args here to get the pretrained paths if there was some. + if self.resume: + # The entire train config is already loaded, we just need to get the checkpoint dir + config_path = parser.parse_arg("config_path") + policy_path = Path(config_path).parent + self.policy.pretrained_path = policy_path + self.checkpoint_path = policy_path.parent + else: + # Only load the policy config + policy_path = parser.get_path_arg("policy") + if policy_path: + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.set_pretrained_path(policy_path) + if not self.job_name: self.job_name = f"{self.env.type}_{self.policy.type}" - if not self.dir: + if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir(): + raise FileExistsError( + f"Output directory {self.output_dir} alreay exists and resume is {self.resume}. " + f"Please change your output directory so that {self.output_dir} is not overwritten." + ) + elif not self.output_dir: now = dt.datetime.now() train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" - self.dir = Path("outputs/train") / train_dir + self.output_dir = Path("outputs/train") / train_dir if self.online.steps > 0 and isinstance(self.dataset.repo_id, list): raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") - elif self.use_policy_training_preset: + elif self.use_policy_training_preset and not self.resume: self.optimizer = self.policy.get_optimizer_preset() self.scheduler = self.policy.get_scheduler_preset() - # If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need - # to check for any differences between the provided config and the checkpoint's config. - checkpoint_cfg_path = self.dir / "checkpoints/last/config.yaml" - if self.resume: - if not checkpoint_cfg_path.exists(): - raise RuntimeError( - f"You have set resume=True, but there is no model checkpoint in {self.dir}" - ) - - # Get the configuration file from the last checkpoint. - checkpoint_cfg = self.from_checkpoint(checkpoint_cfg_path) - - # # Check for differences between the checkpoint configuration and provided configuration. - # # Hack to resolve the delta_timestamps ahead of time in order to properly diff. - # resolve_delta_timestamps(cfg) - diff = DeepDiff(checkpoint_cfg, self) - # Ignore the `resume` and parameters. - if "values_changed" in diff and "root['resume']" in diff["values_changed"]: - del diff["values_changed"]["root['resume']"] - # Log a warning about differences between the checkpoint configuration and the provided - # configuration. - if len(diff) > 0: - logging.warning( - "At least one difference was detected between the checkpoint configuration and " - f"the provided configuration: \n{pformat(diff)}\nNote that the checkpoint configuration " - "takes precedence.", - ) - # Use the checkpoint config instead of the provided config (but keep `resume` parameter). - self = checkpoint_cfg - self.resume = True - - elif checkpoint_cfg_path.exists(): - raise RuntimeError( - f"The configured output directory {checkpoint_cfg_path} already exists. If " - "you meant to resume training, please use `resume=true` in your command or yaml configuration." - ) - @classmethod - def from_checkpoint(cls, config_path: Path): - with open(config_path) as f: - cfg = draccus.load(cls, f) - return cfg + def __get_path_fields__(cls) -> list[Field]: + """This enables the parser to load config from the policy using `--policy.path=local/dir`""" + path_fields = ["policy"] + return [f for f in fields(cls) if f.name in path_fields] + + # @property + # def checkpoint_path(self) -> str | Path | None: + # return self._checkpoint_path + + # @checkpoint_path.setter + # def set_checkpoint_path(self, path: str | Path): + # self._checkpoint_path = path diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 868a27c3c5..5d408a10a8 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -50,7 +50,6 @@ from pathlib import Path from typing import Callable -import draccus import einops import gymnasium as gym import numpy as np @@ -70,6 +69,7 @@ inside_slurm, set_global_seed, ) +from lerobot.configs import parser from lerobot.configs.eval import EvalPipelineConfig @@ -436,7 +436,7 @@ def _compile_episode_data( return data_dict -@draccus.wrap() +@parser.wrap(pathable_args=["policy"]) def eval(cfg: EvalPipelineConfig): # Check device is available device = get_safe_torch_device(cfg.device, log=True) @@ -445,7 +445,7 @@ def eval(cfg: EvalPipelineConfig): torch.backends.cuda.matmul.allow_tf32 = True set_global_seed(cfg.seed) - log_output_dir(cfg.dir) + log_output_dir(cfg.output_dir) logging.info("Making environment.") env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs) @@ -456,7 +456,7 @@ def eval(cfg: EvalPipelineConfig): device=device, env=env, env_cfg=cfg.env, - pretrained_policy_name_or_path=str(cfg.pretrained_policy_path), + # pretrained_policy_name_or_path=str(cfg.policy_path), ) policy.eval() @@ -466,13 +466,13 @@ def eval(cfg: EvalPipelineConfig): policy, cfg.eval.n_episodes, max_episodes_rendered=10, - videos_dir=Path(cfg.dir) / "videos", + videos_dir=Path(cfg.output_dir) / "videos", start_seed=cfg.seed, ) print(info["aggregated"]) # Save info - with open(Path(cfg.dir) / "eval_info.json", "w") as f: + with open(Path(cfg.output_dir) / "eval_info.json", "w") as f: json.dump(info, f, indent=2) env.close() diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 8769ed4a78..0dbb098ca4 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -22,7 +22,6 @@ from pprint import pformat from threading import Lock -import draccus import numpy as np import torch from torch.amp import GradScaler @@ -34,7 +33,7 @@ from lerobot.common.datasets.utils import cycle from lerobot.common.envs.factory import make_env from lerobot.common.logger import Logger, log_output_dir -from lerobot.common.optim.factory import make_optimizer_and_scheduler +from lerobot.common.optim.factory import load_training_state, make_optimizer_and_scheduler from lerobot.common.policies.factory import make_policy from lerobot.common.policies.policy_protocol import PolicyWithUpdate from lerobot.common.policies.utils import get_device_from_parameters @@ -44,6 +43,7 @@ init_logging, set_global_seed, ) +from lerobot.configs import parser from lerobot.configs.training import TrainPipelineConfig from lerobot.scripts.eval import eval_policy @@ -180,7 +180,7 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): logger.log_dict(info, step, mode="eval") -@draccus.wrap() +@parser.wrap(pathable_args=["policy"]) def train(cfg: TrainPipelineConfig): init_logging() logging.info(pformat(asdict(cfg))) @@ -213,7 +213,6 @@ def train(cfg: TrainPipelineConfig): cfg=cfg.policy, device=device, ds_meta=offline_dataset.meta, - pretrained_policy_name_or_path=str(logger.last_pretrained_model_dir) if cfg.resume else None, ) logging.info("Creating optimizer and scheduler") optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy) @@ -222,12 +221,12 @@ def train(cfg: TrainPipelineConfig): step = 0 # number of policy updates (forward + backward + optim) if cfg.resume: - step = logger.load_last_training_state(optimizer, lr_scheduler) + step, optimizer, lr_scheduler = load_training_state(cfg.checkpoint_path, optimizer, lr_scheduler) num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad) num_total_params = sum(p.numel() for p in policy.parameters()) - log_output_dir(cfg.dir) + log_output_dir(cfg.output_dir) logging.info(f"{cfg.env.task=}") logging.info(f"{cfg.offline.steps=} ({format_big_number(cfg.offline.steps)})") logging.info(f"{cfg.online.steps=}") @@ -249,7 +248,7 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): eval_env, policy, cfg.eval.n_episodes, - videos_dir=cfg.dir / "eval" / f"videos_step_{step_identifier}", + videos_dir=cfg.output_dir / "eval" / f"videos_step_{step_identifier}", max_episodes_rendered=4, start_seed=cfg.seed, ) From 06b604bbe2472195736dda3397098ead0cc8418d Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 6 Jan 2025 18:14:12 +0100 Subject: [PATCH 13/94] Add dependency fixes & lock update --- poetry.lock | 4440 ++++++++++++++++++++++++------------------------ pyproject.toml | 6 +- 2 files changed, 2241 insertions(+), 2205 deletions(-) diff --git a/poetry.lock b/poetry.lock index df7a7b9aa4..8d86d3ed02 100644 --- a/poetry.lock +++ b/poetry.lock @@ -13,123 +13,109 @@ files = [ [[package]] name = "aiohappyeyeballs" -version = "2.4.3" +version = "2.4.4" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "aiohappyeyeballs-2.4.3-py3-none-any.whl", hash = "sha256:8a7a83727b2756f394ab2895ea0765a0a8c475e3c71e98d43d76f22b4b435572"}, - {file = "aiohappyeyeballs-2.4.3.tar.gz", hash = "sha256:75cf88a15106a5002a8eb1dab212525c00d1f4c0fa96e551c9fbe6f09a621586"}, + {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, + {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, ] [[package]] name = "aiohttp" -version = "3.10.10" +version = "3.11.11" description = "Async http client/server framework (asyncio)" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "aiohttp-3.10.10-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:be7443669ae9c016b71f402e43208e13ddf00912f47f623ee5994e12fc7d4b3f"}, - {file = "aiohttp-3.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b06b7843929e41a94ea09eb1ce3927865387e3e23ebe108e0d0d09b08d25be9"}, - {file = "aiohttp-3.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:333cf6cf8e65f6a1e06e9eb3e643a0c515bb850d470902274239fea02033e9a8"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:274cfa632350225ce3fdeb318c23b4a10ec25c0e2c880eff951a3842cf358ac1"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9e5e4a85bdb56d224f412d9c98ae4cbd032cc4f3161818f692cd81766eee65a"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b606353da03edcc71130b52388d25f9a30a126e04caef1fd637e31683033abd"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab5a5a0c7a7991d90446a198689c0535be89bbd6b410a1f9a66688f0880ec026"}, - {file = "aiohttp-3.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578a4b875af3e0daaf1ac6fa983d93e0bbfec3ead753b6d6f33d467100cdc67b"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8105fd8a890df77b76dd3054cddf01a879fc13e8af576805d667e0fa0224c35d"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3bcd391d083f636c06a68715e69467963d1f9600f85ef556ea82e9ef25f043f7"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fbc6264158392bad9df19537e872d476f7c57adf718944cc1e4495cbabf38e2a"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e48d5021a84d341bcaf95c8460b152cfbad770d28e5fe14a768988c461b821bc"}, - {file = "aiohttp-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2609e9ab08474702cc67b7702dbb8a80e392c54613ebe80db7e8dbdb79837c68"}, - {file = "aiohttp-3.10.10-cp310-cp310-win32.whl", hash = "sha256:84afcdea18eda514c25bc68b9af2a2b1adea7c08899175a51fe7c4fb6d551257"}, - {file = "aiohttp-3.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:9c72109213eb9d3874f7ac8c0c5fa90e072d678e117d9061c06e30c85b4cf0e6"}, - {file = "aiohttp-3.10.10-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c30a0eafc89d28e7f959281b58198a9fa5e99405f716c0289b7892ca345fe45f"}, - {file = "aiohttp-3.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:258c5dd01afc10015866114e210fb7365f0d02d9d059c3c3415382ab633fcbcb"}, - {file = "aiohttp-3.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:15ecd889a709b0080f02721255b3f80bb261c2293d3c748151274dfea93ac871"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3935f82f6f4a3820270842e90456ebad3af15810cf65932bd24da4463bc0a4c"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:413251f6fcf552a33c981c4709a6bba37b12710982fec8e558ae944bfb2abd38"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1720b4f14c78a3089562b8875b53e36b51c97c51adc53325a69b79b4b48ebcb"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:679abe5d3858b33c2cf74faec299fda60ea9de62916e8b67e625d65bf069a3b7"}, - {file = "aiohttp-3.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79019094f87c9fb44f8d769e41dbb664d6e8fcfd62f665ccce36762deaa0e911"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe2fb38c2ed905a2582948e2de560675e9dfbee94c6d5ccdb1301c6d0a5bf092"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a3f00003de6eba42d6e94fabb4125600d6e484846dbf90ea8e48a800430cc142"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1bbb122c557a16fafc10354b9d99ebf2f2808a660d78202f10ba9d50786384b9"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:30ca7c3b94708a9d7ae76ff281b2f47d8eaf2579cd05971b5dc681db8caac6e1"}, - {file = "aiohttp-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:df9270660711670e68803107d55c2b5949c2e0f2e4896da176e1ecfc068b974a"}, - {file = "aiohttp-3.10.10-cp311-cp311-win32.whl", hash = "sha256:aafc8ee9b742ce75044ae9a4d3e60e3d918d15a4c2e08a6c3c3e38fa59b92d94"}, - {file = "aiohttp-3.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:362f641f9071e5f3ee6f8e7d37d5ed0d95aae656adf4ef578313ee585b585959"}, - {file = "aiohttp-3.10.10-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9294bbb581f92770e6ed5c19559e1e99255e4ca604a22c5c6397b2f9dd3ee42c"}, - {file = "aiohttp-3.10.10-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a8fa23fe62c436ccf23ff930149c047f060c7126eae3ccea005f0483f27b2e28"}, - {file = "aiohttp-3.10.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c6a5b8c7926ba5d8545c7dd22961a107526562da31a7a32fa2456baf040939f"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:007ec22fbc573e5eb2fb7dec4198ef8f6bf2fe4ce20020798b2eb5d0abda6138"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9627cc1a10c8c409b5822a92d57a77f383b554463d1884008e051c32ab1b3742"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50edbcad60d8f0e3eccc68da67f37268b5144ecc34d59f27a02f9611c1d4eec7"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a45d85cf20b5e0d0aa5a8dca27cce8eddef3292bc29d72dcad1641f4ed50aa16"}, - {file = "aiohttp-3.10.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b00807e2605f16e1e198f33a53ce3c4523114059b0c09c337209ae55e3823a8"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f2d4324a98062be0525d16f768a03e0bbb3b9fe301ceee99611dc9a7953124e6"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:438cd072f75bb6612f2aca29f8bd7cdf6e35e8f160bc312e49fbecab77c99e3a"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:baa42524a82f75303f714108fea528ccacf0386af429b69fff141ffef1c534f9"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a7d8d14fe962153fc681f6366bdec33d4356f98a3e3567782aac1b6e0e40109a"}, - {file = "aiohttp-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c1277cd707c465cd09572a774559a3cc7c7a28802eb3a2a9472588f062097205"}, - {file = "aiohttp-3.10.10-cp312-cp312-win32.whl", hash = "sha256:59bb3c54aa420521dc4ce3cc2c3fe2ad82adf7b09403fa1f48ae45c0cbde6628"}, - {file = "aiohttp-3.10.10-cp312-cp312-win_amd64.whl", hash = "sha256:0e1b370d8007c4ae31ee6db7f9a2fe801a42b146cec80a86766e7ad5c4a259cf"}, - {file = "aiohttp-3.10.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ad7593bb24b2ab09e65e8a1d385606f0f47c65b5a2ae6c551db67d6653e78c28"}, - {file = "aiohttp-3.10.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1eb89d3d29adaf533588f209768a9c02e44e4baf832b08118749c5fad191781d"}, - {file = "aiohttp-3.10.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3fe407bf93533a6fa82dece0e74dbcaaf5d684e5a51862887f9eaebe6372cd79"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aed5155f819873d23520919e16703fc8925e509abbb1a1491b0087d1cd969e"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f05e9727ce409358baa615dbeb9b969db94324a79b5a5cea45d39bdb01d82e6"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dffb610a30d643983aeb185ce134f97f290f8935f0abccdd32c77bed9388b42"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6658732517ddabe22c9036479eabce6036655ba87a0224c612e1ae6af2087e"}, - {file = "aiohttp-3.10.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:741a46d58677d8c733175d7e5aa618d277cd9d880301a380fd296975a9cdd7bc"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e00e3505cd80440f6c98c6d69269dcc2a119f86ad0a9fd70bccc59504bebd68a"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ffe595f10566f8276b76dc3a11ae4bb7eba1aac8ddd75811736a15b0d5311414"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdfcf6443637c148c4e1a20c48c566aa694fa5e288d34b20fcdc58507882fed3"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d183cf9c797a5291e8301790ed6d053480ed94070637bfaad914dd38b0981f67"}, - {file = "aiohttp-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:77abf6665ae54000b98b3c742bc6ea1d1fb31c394bcabf8b5d2c1ac3ebfe7f3b"}, - {file = "aiohttp-3.10.10-cp313-cp313-win32.whl", hash = "sha256:4470c73c12cd9109db8277287d11f9dd98f77fc54155fc71a7738a83ffcc8ea8"}, - {file = "aiohttp-3.10.10-cp313-cp313-win_amd64.whl", hash = "sha256:486f7aabfa292719a2753c016cc3a8f8172965cabb3ea2e7f7436c7f5a22a151"}, - {file = "aiohttp-3.10.10-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1b66ccafef7336a1e1f0e389901f60c1d920102315a56df85e49552308fc0486"}, - {file = "aiohttp-3.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:acd48d5b80ee80f9432a165c0ac8cbf9253eaddb6113269a5e18699b33958dbb"}, - {file = "aiohttp-3.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3455522392fb15ff549d92fbf4b73b559d5e43dc522588f7eb3e54c3f38beee7"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c3b868724137f713a38376fef8120c166d1eadd50da1855c112fe97954aed8"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:da1dee8948d2137bb51fbb8a53cce6b1bcc86003c6b42565f008438b806cccd8"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5ce2ce7c997e1971b7184ee37deb6ea9922ef5163c6ee5aa3c274b05f9e12fa"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28529e08fde6f12eba8677f5a8608500ed33c086f974de68cc65ab218713a59d"}, - {file = "aiohttp-3.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7db54c7914cc99d901d93a34704833568d86c20925b2762f9fa779f9cd2e70f"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03a42ac7895406220124c88911ebee31ba8b2d24c98507f4a8bf826b2937c7f2"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7e338c0523d024fad378b376a79faff37fafb3c001872a618cde1d322400a572"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:038f514fe39e235e9fef6717fbf944057bfa24f9b3db9ee551a7ecf584b5b480"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:64f6c17757251e2b8d885d728b6433d9d970573586a78b78ba8929b0f41d045a"}, - {file = "aiohttp-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:93429602396f3383a797a2a70e5f1de5df8e35535d7806c9f91df06f297e109b"}, - {file = "aiohttp-3.10.10-cp38-cp38-win32.whl", hash = "sha256:c823bc3971c44ab93e611ab1a46b1eafeae474c0c844aff4b7474287b75fe49c"}, - {file = "aiohttp-3.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:54ca74df1be3c7ca1cf7f4c971c79c2daf48d9aa65dea1a662ae18926f5bc8ce"}, - {file = "aiohttp-3.10.10-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01948b1d570f83ee7bbf5a60ea2375a89dfb09fd419170e7f5af029510033d24"}, - {file = "aiohttp-3.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9fc1500fd2a952c5c8e3b29aaf7e3cc6e27e9cfc0a8819b3bce48cc1b849e4cc"}, - {file = "aiohttp-3.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f614ab0c76397661b90b6851a030004dac502e48260ea10f2441abd2207fbcc7"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00819de9e45d42584bed046314c40ea7e9aea95411b38971082cad449392b08c"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05646ebe6b94cc93407b3bf34b9eb26c20722384d068eb7339de802154d61bc5"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:998f3bd3cfc95e9424a6acd7840cbdd39e45bc09ef87533c006f94ac47296090"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9010c31cd6fa59438da4e58a7f19e4753f7f264300cd152e7f90d4602449762"}, - {file = "aiohttp-3.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ea7ffc6d6d6f8a11e6f40091a1040995cdff02cfc9ba4c2f30a516cb2633554"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ef9c33cc5cbca35808f6c74be11eb7f5f6b14d2311be84a15b594bd3e58b5527"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ce0cdc074d540265bfeb31336e678b4e37316849d13b308607efa527e981f5c2"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:597a079284b7ee65ee102bc3a6ea226a37d2b96d0418cc9047490f231dc09fe8"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:7789050d9e5d0c309c706953e5e8876e38662d57d45f936902e176d19f1c58ab"}, - {file = "aiohttp-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e7f8b04d83483577fd9200461b057c9f14ced334dcb053090cea1da9c8321a91"}, - {file = "aiohttp-3.10.10-cp39-cp39-win32.whl", hash = "sha256:c02a30b904282777d872266b87b20ed8cc0d1501855e27f831320f471d54d983"}, - {file = "aiohttp-3.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:edfe3341033a6b53a5c522c802deb2079eee5cbfbb0af032a55064bd65c73a23"}, - {file = "aiohttp-3.10.10.tar.gz", hash = "sha256:0631dd7c9f0822cc61c88586ca76d5b5ada26538097d0f1df510b082bad3411a"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60804bff28662cbcf340a4d61598891f12eea3a66af48ecfdc975ceec21e3c8"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b4fa1cb5f270fb3eab079536b764ad740bb749ce69a94d4ec30ceee1b5940d5"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:731468f555656767cda219ab42e033355fe48c85fbe3ba83a349631541715ba2"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb23d8bb86282b342481cad4370ea0853a39e4a32a0042bb52ca6bdde132df43"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f047569d655f81cb70ea5be942ee5d4421b6219c3f05d131f64088c73bb0917f"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd7659baae9ccf94ae5fe8bfaa2c7bc2e94d24611528395ce88d009107e00c6d"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af01e42ad87ae24932138f154105e88da13ce7d202a6de93fafdafb2883a00ef"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5854be2f3e5a729800bac57a8d76af464e160f19676ab6aea74bde18ad19d438"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6526e5fb4e14f4bbf30411216780c9967c20c5a55f2f51d3abd6de68320cc2f3"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:85992ee30a31835fc482468637b3e5bd085fa8fe9392ba0bdcbdc1ef5e9e3c55"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:88a12ad8ccf325a8a5ed80e6d7c3bdc247d66175afedbe104ee2aaca72960d8e"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0a6d3fbf2232e3a08c41eca81ae4f1dff3d8f1a30bae415ebe0af2d2458b8a33"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84a585799c58b795573c7fa9b84c455adf3e1d72f19a2bf498b54a95ae0d194c"}, + {file = "aiohttp-3.11.11-cp310-cp310-win32.whl", hash = "sha256:bfde76a8f430cf5c5584553adf9926534352251d379dcb266ad2b93c54a29745"}, + {file = "aiohttp-3.11.11-cp310-cp310-win_amd64.whl", hash = "sha256:0fd82b8e9c383af11d2b26f27a478640b6b83d669440c0a71481f7c865a51da9"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ba74ec819177af1ef7f59063c6d35a214a8fde6f987f7661f4f0eecc468a8f76"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4af57160800b7a815f3fe0eba9b46bf28aafc195555f1824555fa2cfab6c1538"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffa336210cf9cd8ed117011085817d00abe4c08f99968deef0013ea283547204"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81b8fe282183e4a3c7a1b72f5ade1094ed1c6345a8f153506d114af5bf8accd9"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af41686ccec6a0f2bdc66686dc0f403c41ac2089f80e2214a0f82d001052c03"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70d1f9dde0e5dd9e292a6d4d00058737052b01f3532f69c0c65818dac26dc287"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:249cc6912405917344192b9f9ea5cd5b139d49e0d2f5c7f70bdfaf6b4dbf3a2e"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0eb98d90b6690827dcc84c246811feeb4e1eea683c0eac6caed7549be9c84665"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec82bf1fda6cecce7f7b915f9196601a1bd1a3079796b76d16ae4cce6d0ef89b"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9fd46ce0845cfe28f108888b3ab17abff84ff695e01e73657eec3f96d72eef34"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bd176afcf8f5d2aed50c3647d4925d0db0579d96f75a31e77cbaf67d8a87742d"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:ec2aa89305006fba9ffb98970db6c8221541be7bee4c1d027421d6f6df7d1ce2"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:92cde43018a2e17d48bb09c79e4d4cb0e236de5063ce897a5e40ac7cb4878773"}, + {file = "aiohttp-3.11.11-cp311-cp311-win32.whl", hash = "sha256:aba807f9569455cba566882c8938f1a549f205ee43c27b126e5450dc9f83cc62"}, + {file = "aiohttp-3.11.11-cp311-cp311-win_amd64.whl", hash = "sha256:ae545f31489548c87b0cced5755cfe5a5308d00407000e72c4fa30b19c3220ac"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e595c591a48bbc295ebf47cb91aebf9bd32f3ff76749ecf282ea7f9f6bb73886"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ea1b59dc06396b0b424740a10a0a63974c725b1c64736ff788a3689d36c02d2"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8811f3f098a78ffa16e0ea36dffd577eb031aea797cbdba81be039a4169e242c"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7227b87a355ce1f4bf83bfae4399b1f5bb42e0259cb9405824bd03d2f4336a"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d40f9da8cabbf295d3a9dae1295c69975b86d941bc20f0a087f0477fa0a66231"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffb3dc385f6bb1568aa974fe65da84723210e5d9707e360e9ecb51f59406cd2e"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8f5f7515f3552d899c61202d99dcb17d6e3b0de777900405611cd747cecd1b8"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3499c7ffbfd9c6a3d8d6a2b01c26639da7e43d47c7b4f788016226b1e711caa8"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8e2bf8029dbf0810c7bfbc3e594b51c4cc9101fbffb583a3923aea184724203c"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b6212a60e5c482ef90f2d788835387070a88d52cf6241d3916733c9176d39eab"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d119fafe7b634dbfa25a8c597718e69a930e4847f0b88e172744be24515140da"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:6fba278063559acc730abf49845d0e9a9e1ba74f85f0ee6efd5803f08b285853"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92fc484e34b733704ad77210c7957679c5c3877bd1e6b6d74b185e9320cc716e"}, + {file = "aiohttp-3.11.11-cp312-cp312-win32.whl", hash = "sha256:9f5b3c1ed63c8fa937a920b6c1bec78b74ee09593b3f5b979ab2ae5ef60d7600"}, + {file = "aiohttp-3.11.11-cp312-cp312-win_amd64.whl", hash = "sha256:1e69966ea6ef0c14ee53ef7a3d68b564cc408121ea56c0caa2dc918c1b2f553d"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:541d823548ab69d13d23730a06f97460f4238ad2e5ed966aaf850d7c369782d9"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:929f3ed33743a49ab127c58c3e0a827de0664bfcda566108989a14068f820194"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0882c2820fd0132240edbb4a51eb8ceb6eef8181db9ad5291ab3332e0d71df5f"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63de12e44935d5aca7ed7ed98a255a11e5cb47f83a9fded7a5e41c40277d104"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa54f8ef31d23c506910c21163f22b124facb573bff73930735cf9fe38bf7dff"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a344d5dc18074e3872777b62f5f7d584ae4344cd6006c17ba12103759d407af3"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7fb429ab1aafa1f48578eb315ca45bd46e9c37de11fe45c7f5f4138091e2f1"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c341c7d868750e31961d6d8e60ff040fb9d3d3a46d77fd85e1ab8e76c3e9a5c4"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed9ee95614a71e87f1a70bc81603f6c6760128b140bc4030abe6abaa988f1c3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:de8d38f1c2810fa2a4f1d995a2e9c70bb8737b18da04ac2afbf3971f65781d87"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a9b7371665d4f00deb8f32208c7c5e652059b0fda41cf6dbcac6114a041f1cc2"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:620598717fce1b3bd14dd09947ea53e1ad510317c85dda2c9c65b622edc96b12"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf8d9bfee991d8acc72d060d53860f356e07a50f0e0d09a8dfedea1c554dd0d5"}, + {file = "aiohttp-3.11.11-cp313-cp313-win32.whl", hash = "sha256:9d73ee3725b7a737ad86c2eac5c57a4a97793d9f442599bea5ec67ac9f4bdc3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-win_amd64.whl", hash = "sha256:c7a06301c2fb096bdb0bd25fe2011531c1453b9f2c163c8031600ec73af1cc99"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3e23419d832d969f659c208557de4a123e30a10d26e1e14b73431d3c13444c2e"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21fef42317cf02e05d3b09c028712e1d73a9606f02467fd803f7c1f39cc59add"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1f21bb8d0235fc10c09ce1d11ffbd40fc50d3f08a89e4cf3a0c503dc2562247a"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1642eceeaa5ab6c9b6dfeaaa626ae314d808188ab23ae196a34c9d97efb68350"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2170816e34e10f2fd120f603e951630f8a112e1be3b60963a1f159f5699059a6"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8be8508d110d93061197fd2d6a74f7401f73b6d12f8822bbcd6d74f2b55d71b1"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eed954b161e6b9b65f6be446ed448ed3921763cc432053ceb606f89d793927e"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6c9af134da4bc9b3bd3e6a70072509f295d10ee60c697826225b60b9959acdd"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44167fc6a763d534a6908bdb2592269b4bf30a03239bcb1654781adf5e49caf1"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:479b8c6ebd12aedfe64563b85920525d05d394b85f166b7873c8bde6da612f9c"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:10b4ff0ad793d98605958089fabfa350e8e62bd5d40aa65cdc69d6785859f94e"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b540bd67cfb54e6f0865ceccd9979687210d7ed1a1cc8c01f8e67e2f1e883d28"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dac54e8ce2ed83b1f6b1a54005c87dfed139cf3f777fdc8afc76e7841101226"}, + {file = "aiohttp-3.11.11-cp39-cp39-win32.whl", hash = "sha256:568c1236b2fde93b7720f95a890741854c1200fba4a3471ff48b2934d2d93fd3"}, + {file = "aiohttp-3.11.11-cp39-cp39-win_amd64.whl", hash = "sha256:943a8b052e54dfd6439fd7989f67fc6a7f2138d0a2cf0a7de5f18aa4fe7eb3b1"}, + {file = "aiohttp-3.11.11.tar.gz", hash = "sha256:bb49c7f1e6ebf3821a42d81d494f538107610c3a705987f53068546b0e90303e"}, ] [package.dependencies] aiohappyeyeballs = ">=2.3.0" aiosignal = ">=1.1.2" -async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} +async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""} attrs = ">=17.3.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" -yarl = ">=1.12.0,<2.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" [package.extras] speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] @@ -149,18 +135,29 @@ pyserial = "*" [[package]] name = "aiosignal" -version = "1.3.1" +version = "1.3.2" description = "aiosignal: a list of registered asynchronous callbacks" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, + {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, + {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, ] [package.dependencies] frozenlist = ">=1.1.0" +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + [[package]] name = "antlr4-python3-runtime" version = "4.9.3" @@ -173,24 +170,24 @@ files = [ [[package]] name = "anyio" -version = "4.6.0" +version = "4.8.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = true python-versions = ">=3.9" files = [ - {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, - {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, + {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, + {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, ] [package.dependencies] exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] trio = ["trio (>=0.26.1)"] [[package]] @@ -292,21 +289,18 @@ files = [ [[package]] name = "asttokens" -version = "2.4.1" +version = "3.0.0" description = "Annotate AST trees with source code positions" optional = true -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, - {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, + {file = "asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2"}, + {file = "asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7"}, ] -[package.dependencies] -six = ">=1.12.0" - [package.extras] -astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] -test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] +astroid = ["astroid (>=2,<4)"] +test = ["astroid (>=2,<4)", "pytest", "pytest-cov", "pytest-xdist"] [[package]] name = "async-lru" @@ -324,30 +318,30 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} [[package]] name = "async-timeout" -version = "4.0.3" +version = "5.0.1" description = "Timeout context manager for asyncio programs" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, - {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, + {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, + {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, ] [[package]] name = "attrs" -version = "24.2.0" +version = "24.3.0" description = "Classes Without Boilerplate" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, - {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, + {file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"}, + {file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"}, ] [package.extras] benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] @@ -389,42 +383,42 @@ lxml = ["lxml"] [[package]] name = "bleach" -version = "6.1.0" +version = "6.2.0" description = "An easy safelist-based HTML-sanitizing tool." optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, - {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, + {file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"}, + {file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"}, ] [package.dependencies] -six = ">=1.9.0" +tinycss2 = {version = ">=1.1.0,<1.5", optional = true, markers = "extra == \"css\""} webencodings = "*" [package.extras] -css = ["tinycss2 (>=1.1.0,<1.3)"] +css = ["tinycss2 (>=1.1.0,<1.5)"] [[package]] name = "blinker" -version = "1.8.2" +version = "1.9.0" description = "Fast, simple object-to-object and broadcast signaling" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "blinker-1.8.2-py3-none-any.whl", hash = "sha256:1779309f71bf239144b9399d06ae925637cf6634cf6bd131104184531bf67c01"}, - {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"}, + {file = "blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc"}, + {file = "blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf"}, ] [[package]] name = "certifi" -version = "2024.8.30" +version = "2024.12.14" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, + {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, + {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, ] [[package]] @@ -519,116 +513,103 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.4.0" +version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.7" files = [ - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, - {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, - {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] [[package]] @@ -644,13 +625,13 @@ files = [ [[package]] name = "click" -version = "8.1.7" +version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, ] [package.dependencies] @@ -658,13 +639,13 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cloudpickle" -version = "3.0.0" +version = "3.1.0" description = "Pickler class to extend the standard pickle.Pickler functionality" optional = false python-versions = ">=3.8" files = [ - {file = "cloudpickle-3.0.0-py3-none-any.whl", hash = "sha256:246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7"}, - {file = "cloudpickle-3.0.0.tar.gz", hash = "sha256:996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882"}, + {file = "cloudpickle-3.1.0-py3-none-any.whl", hash = "sha256:fe11acda67f61aaaec473e3afe030feb131d78a43461b718185363384f1ba12e"}, + {file = "cloudpickle-3.1.0.tar.gz", hash = "sha256:81a929b6e3c7335c863c771d673d105f02efdb89dfaba0c90495d1c64796601b"}, ] [[package]] @@ -687,28 +668,30 @@ plotting = ["matplotlib"] [[package]] name = "cmake" -version = "3.30.4" +version = "3.31.2" description = "CMake is an open-source, cross-platform family of tools designed to build, test and package software" optional = false python-versions = ">=3.7" files = [ - {file = "cmake-3.30.4-py3-none-macosx_10_10_universal2.whl", hash = "sha256:8a1a30125213c3d44b81a1af0085ad1dcd77abc61bcdf330556e83898428198a"}, - {file = "cmake-3.30.4-py3-none-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9f69b3706ae93fa48762871bdc7cb759fbbbadb04452e5eab820537c35fabcb6"}, - {file = "cmake-3.30.4-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:969af8432a17168e5b88e2efba11e5e14b7ca38aa638975b7ce1b19044c5183f"}, - {file = "cmake-3.30.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a5929e21af39a3adf4058aea54aa2197198e06315ebad541dda0baf20e2b32b"}, - {file = "cmake-3.30.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9808d3744e57c6fd71d93e2ca95142d67578a13a8867f7e8b000f343799899f"}, - {file = "cmake-3.30.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0a223c62cfeebcb7b90f715c16bb2e83ee37e8c3e676efde83b094d62c278ec2"}, - {file = "cmake-3.30.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08466455fbac67287a4868819ae0e0ab16d60c02eb209ae5e6d70e0e35d0e601"}, - {file = "cmake-3.30.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b8a4b0e638ddbabd16cad8b053b5a66733ddaf652dc3d46d55b3887314022fe"}, - {file = "cmake-3.30.4-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:a8f3160cc2b362c0ba03d70300a36bca5a58e1f82c345f4f54a4da7f59b7b2b4"}, - {file = "cmake-3.30.4-py3-none-musllinux_1_1_i686.whl", hash = "sha256:13bd1afa2e9988973f18c2425823081a044929e80685731601f093ff673d2db7"}, - {file = "cmake-3.30.4-py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:d2ab1018a42e03cf2e843f9565bc2ff7465a1a66c1cbfaba30d494a5e26f763e"}, - {file = "cmake-3.30.4-py3-none-musllinux_1_1_s390x.whl", hash = "sha256:2d6367a438c11f0863c9cdea843acd09514e94534ce0d115bc8f7905aaff243d"}, - {file = "cmake-3.30.4-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:e4cc37735bdc7ba058abdddd3f94ac9dc32cae0f94ae68661565b39f64a9a22f"}, - {file = "cmake-3.30.4-py3-none-win32.whl", hash = "sha256:a08e9a987be5da69941f4a26dd7614fcbb5039394821fbcce9716c20a1571c0c"}, - {file = "cmake-3.30.4-py3-none-win_amd64.whl", hash = "sha256:2d128d0831924788c1e87d6ca9abe4594e2ccde718712b0fa2c8c3a99b0d1282"}, - {file = "cmake-3.30.4-py3-none-win_arm64.whl", hash = "sha256:2825874fb84bd9d05c40b1a4347366d9949c9f6bac7a9ace97ac7faf9d573b8b"}, - {file = "cmake-3.30.4.tar.gz", hash = "sha256:fedd88495e742a1316078c283c2b4c2eeac4c34eca3234401d28f09ee58a320f"}, + {file = "cmake-3.31.2-py3-none-macosx_10_10_universal2.whl", hash = "sha256:8c1fa50cafe54f9aa074d03cda1ade54271039d939194adc9cd1ac388b1af055"}, + {file = "cmake-3.31.2-py3-none-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8210a40d5b08bec7c752974f2b217a062a092480e33dcbd39d46a8cd96c29ddc"}, + {file = "cmake-3.31.2-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:82ec0a96b965874dc793ed6d3aa7edad6f364d4ba8b86307548bfbbca70fd2dd"}, + {file = "cmake-3.31.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604c44684dbcbec1458310bd57b9e69b7768ddd7cd2fc852607ca24616f34518"}, + {file = "cmake-3.31.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d8c840502f84a16562820ee23f963583953939de63a9582f0f7735868cd18e6"}, + {file = "cmake-3.31.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2988aac62b9ada74bb802a8065ea58abe57bf203c057bb7e0456c3575a89c48a"}, + {file = "cmake-3.31.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8f9d7f8371a6739bbec7c238d213877f31b22a10930c91dea59b8b9463b6ee1"}, + {file = "cmake-3.31.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31aaa73c6bf49109b2a7ab86b3e6887b5db0da6be30ddfb30bed160b84787f89"}, + {file = "cmake-3.31.2-py3-none-manylinux_2_31_armv7l.whl", hash = "sha256:79b7eb78aea04e363a736e544afc4b4489f50415075bd77131e5314778b8e879"}, + {file = "cmake-3.31.2-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:09b3b1c919c76d25272bd9a0f15baf954d6c883abffdd1cfb3fbf1afa7a2c556"}, + {file = "cmake-3.31.2-py3-none-musllinux_1_1_i686.whl", hash = "sha256:aec014f19536f2b6b0a94f4e20990c28fb93c4bdf9193d57fa5e50ef829aaf78"}, + {file = "cmake-3.31.2-py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:994e14f485329d58d316487bd1759ad89717b895079e8b892a8220f03c1c5267"}, + {file = "cmake-3.31.2-py3-none-musllinux_1_1_s390x.whl", hash = "sha256:e8fc23d376b3fae8945067f397d8503fff210eefe1e49ab9ece1d99a88679cf4"}, + {file = "cmake-3.31.2-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:fa3b23b8bd52c0ae9e3c6b635ac8ee70d8f35d24bacf39cc4cea22aec6e4ed84"}, + {file = "cmake-3.31.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7b5f4f5ec4b0d6275369881a2a7bf7230af1cb60afdb20a7b2fbc70690f13564"}, + {file = "cmake-3.31.2-py3-none-win32.whl", hash = "sha256:378036396394dad7673cdfc603bb85af34945607df43e8dad731f5907c755f3b"}, + {file = "cmake-3.31.2-py3-none-win_amd64.whl", hash = "sha256:cedb6de320a65ff0137e5c6090b9b7fba459788237d3d4deb6e66be19fe9b61d"}, + {file = "cmake-3.31.2-py3-none-win_arm64.whl", hash = "sha256:3bd054996b8a36ff5beb3cdd0ffbf8edf23d719cf946762662a9fb70525b1d1b"}, + {file = "cmake-3.31.2.tar.gz", hash = "sha256:16a323fcbb86cf8a10aea82cd4deecb33edb3ed7e8907be8a06933ce04f6e6d1"}, ] [package.extras] @@ -759,76 +742,65 @@ yaml = ["PyYAML"] [[package]] name = "contourpy" -version = "1.3.0" +version = "1.3.1" description = "Python library for calculating contours of 2D quadrilateral grids" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"}, - {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"}, - {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"}, - {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"}, - {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"}, - {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"}, - {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"}, - {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"}, - {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"}, - {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"}, - {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"}, - {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"}, - {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"}, - {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"}, - {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"}, - {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"}, - {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"}, - {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"}, - {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"}, - {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"}, - {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"}, - {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"}, - {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"}, - {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"}, - {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"}, - {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"}, - {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"}, - {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"}, - {file = "contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8"}, - {file = "contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2"}, - {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e"}, - {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800"}, - {file = "contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5"}, - {file = "contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb"}, - {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"}, + {file = "contourpy-1.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a045f341a77b77e1c5de31e74e966537bba9f3c4099b35bf4c2e3939dd54cdab"}, + {file = "contourpy-1.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:500360b77259914f7805af7462e41f9cb7ca92ad38e9f94d6c8641b089338124"}, + {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2f926efda994cdf3c8d3fdb40b9962f86edbc4457e739277b961eced3d0b4c1"}, + {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adce39d67c0edf383647a3a007de0a45fd1b08dedaa5318404f1a73059c2512b"}, + {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abbb49fb7dac584e5abc6636b7b2a7227111c4f771005853e7d25176daaf8453"}, + {file = "contourpy-1.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0cffcbede75c059f535725c1680dfb17b6ba8753f0c74b14e6a9c68c29d7ea3"}, + {file = "contourpy-1.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ab29962927945d89d9b293eabd0d59aea28d887d4f3be6c22deaefbb938a7277"}, + {file = "contourpy-1.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974d8145f8ca354498005b5b981165b74a195abfae9a8129df3e56771961d595"}, + {file = "contourpy-1.3.1-cp310-cp310-win32.whl", hash = "sha256:ac4578ac281983f63b400f7fe6c101bedc10651650eef012be1ccffcbacf3697"}, + {file = "contourpy-1.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:174e758c66bbc1c8576992cec9599ce8b6672b741b5d336b5c74e35ac382b18e"}, + {file = "contourpy-1.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8b974d8db2c5610fb4e76307e265de0edb655ae8169e8b21f41807ccbeec4b"}, + {file = "contourpy-1.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:20914c8c973f41456337652a6eeca26d2148aa96dd7ac323b74516988bea89fc"}, + {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d40d37c1c3a4961b4619dd9d77b12124a453cc3d02bb31a07d58ef684d3d86"}, + {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:113231fe3825ebf6f15eaa8bc1f5b0ddc19d42b733345eae0934cb291beb88b6"}, + {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4dbbc03a40f916a8420e420d63e96a1258d3d1b58cbdfd8d1f07b49fcbd38e85"}, + {file = "contourpy-1.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a04ecd68acbd77fa2d39723ceca4c3197cb2969633836ced1bea14e219d077c"}, + {file = "contourpy-1.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c414fc1ed8ee1dbd5da626cf3710c6013d3d27456651d156711fa24f24bd1291"}, + {file = "contourpy-1.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:31c1b55c1f34f80557d3830d3dd93ba722ce7e33a0b472cba0ec3b6535684d8f"}, + {file = "contourpy-1.3.1-cp311-cp311-win32.whl", hash = "sha256:f611e628ef06670df83fce17805c344710ca5cde01edfdc72751311da8585375"}, + {file = "contourpy-1.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:b2bdca22a27e35f16794cf585832e542123296b4687f9fd96822db6bae17bfc9"}, + {file = "contourpy-1.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ffa84be8e0bd33410b17189f7164c3589c229ce5db85798076a3fa136d0e509"}, + {file = "contourpy-1.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805617228ba7e2cbbfb6c503858e626ab528ac2a32a04a2fe88ffaf6b02c32bc"}, + {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade08d343436a94e633db932e7e8407fe7de8083967962b46bdfc1b0ced39454"}, + {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47734d7073fb4590b4a40122b35917cd77be5722d80683b249dac1de266aac80"}, + {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ba94a401342fc0f8b948e57d977557fbf4d515f03c67682dd5c6191cb2d16ec"}, + {file = "contourpy-1.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efa874e87e4a647fd2e4f514d5e91c7d493697127beb95e77d2f7561f6905bd9"}, + {file = "contourpy-1.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1bf98051f1045b15c87868dbaea84f92408337d4f81d0e449ee41920ea121d3b"}, + {file = "contourpy-1.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61332c87493b00091423e747ea78200659dc09bdf7fd69edd5e98cef5d3e9a8d"}, + {file = "contourpy-1.3.1-cp312-cp312-win32.whl", hash = "sha256:e914a8cb05ce5c809dd0fe350cfbb4e881bde5e2a38dc04e3afe1b3e58bd158e"}, + {file = "contourpy-1.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:08d9d449a61cf53033612cb368f3a1b26cd7835d9b8cd326647efe43bca7568d"}, + {file = "contourpy-1.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a761d9ccfc5e2ecd1bf05534eda382aa14c3e4f9205ba5b1684ecfe400716ef2"}, + {file = "contourpy-1.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:523a8ee12edfa36f6d2a49407f705a6ef4c5098de4f498619787e272de93f2d5"}, + {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece6df05e2c41bd46776fbc712e0996f7c94e0d0543af1656956d150c4ca7c81"}, + {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:573abb30e0e05bf31ed067d2f82500ecfdaec15627a59d63ea2d95714790f5c2"}, + {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fa36448e6a3a1a9a2ba23c02012c43ed88905ec80163f2ffe2421c7192a5d7"}, + {file = "contourpy-1.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ea9924d28fc5586bf0b42d15f590b10c224117e74409dd7a0be3b62b74a501c"}, + {file = "contourpy-1.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b75aa69cb4d6f137b36f7eb2ace9280cfb60c55dc5f61c731fdf6f037f958a3"}, + {file = "contourpy-1.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:041b640d4ec01922083645a94bb3b2e777e6b626788f4095cf21abbe266413c1"}, + {file = "contourpy-1.3.1-cp313-cp313-win32.whl", hash = "sha256:36987a15e8ace5f58d4d5da9dca82d498c2bbb28dff6e5d04fbfcc35a9cb3a82"}, + {file = "contourpy-1.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7895f46d47671fa7ceec40f31fae721da51ad34bdca0bee83e38870b1f47ffd"}, + {file = "contourpy-1.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ddeb796389dadcd884c7eb07bd14ef12408aaae358f0e2ae24114d797eede30"}, + {file = "contourpy-1.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19c1555a6801c2f084c7ddc1c6e11f02eb6a6016ca1318dd5452ba3f613a1751"}, + {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841ad858cff65c2c04bf93875e384ccb82b654574a6d7f30453a04f04af71342"}, + {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4318af1c925fb9a4fb190559ef3eec206845f63e80fb603d47f2d6d67683901c"}, + {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14c102b0eab282427b662cb590f2e9340a9d91a1c297f48729431f2dcd16e14f"}, + {file = "contourpy-1.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e806338bfeaa006acbdeba0ad681a10be63b26e1b17317bfac3c5d98f36cda"}, + {file = "contourpy-1.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4d76d5993a34ef3df5181ba3c92fabb93f1eaa5729504fb03423fcd9f3177242"}, + {file = "contourpy-1.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:89785bb2a1980c1bd87f0cb1517a71cde374776a5f150936b82580ae6ead44a1"}, + {file = "contourpy-1.3.1-cp313-cp313t-win32.whl", hash = "sha256:8eb96e79b9f3dcadbad2a3891672f81cdcab7f95b27f28f1c67d75f045b6b4f1"}, + {file = "contourpy-1.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:287ccc248c9e0d0566934e7d606201abd74761b5703d804ff3df8935f523d546"}, + {file = "contourpy-1.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b457d6430833cee8e4b8e9b6f07aa1c161e5e0d52e118dc102c8f9bd7dd060d6"}, + {file = "contourpy-1.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb76c1a154b83991a3cbbf0dfeb26ec2833ad56f95540b442c73950af2013750"}, + {file = "contourpy-1.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:44a29502ca9c7b5ba389e620d44f2fbe792b1fb5734e8b931ad307071ec58c53"}, + {file = "contourpy-1.3.1.tar.gz", hash = "sha256:dfd97abd83335045a913e3bcc4a09c0ceadbe66580cf573fe961f4a825efa699"}, ] [package.dependencies] @@ -843,73 +815,73 @@ test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist" [[package]] name = "coverage" -version = "7.6.2" +version = "7.6.10" description = "Code coverage measurement for Python" optional = true python-versions = ">=3.9" files = [ - {file = "coverage-7.6.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9df1950fb92d49970cce38100d7e7293c84ed3606eaa16ea0b6bc27175bb667"}, - {file = "coverage-7.6.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:24500f4b0e03aab60ce575c85365beab64b44d4db837021e08339f61d1fbfe52"}, - {file = "coverage-7.6.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a663b180b6669c400b4630a24cc776f23a992d38ce7ae72ede2a397ce6b0f170"}, - {file = "coverage-7.6.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfde025e2793a22efe8c21f807d276bd1d6a4bcc5ba6f19dbdfc4e7a12160909"}, - {file = "coverage-7.6.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:087932079c065d7b8ebadd3a0160656c55954144af6439886c8bcf78bbbcde7f"}, - {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9c6b0c1cafd96213a0327cf680acb39f70e452caf8e9a25aeb05316db9c07f89"}, - {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6e85830eed5b5263ffa0c62428e43cb844296f3b4461f09e4bdb0d44ec190bc2"}, - {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:62ab4231c01e156ece1b3a187c87173f31cbeee83a5e1f6dff17f288dca93345"}, - {file = "coverage-7.6.2-cp310-cp310-win32.whl", hash = "sha256:7b80fbb0da3aebde102a37ef0138aeedff45997e22f8962e5f16ae1742852676"}, - {file = "coverage-7.6.2-cp310-cp310-win_amd64.whl", hash = "sha256:d20c3d1f31f14d6962a4e2f549c21d31e670b90f777ef4171be540fb7fb70f02"}, - {file = "coverage-7.6.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb21bac7783c1bf6f4bbe68b1e0ff0d20e7e7732cfb7995bc8d96e23aa90fc7b"}, - {file = "coverage-7.6.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a7b2e437fbd8fae5bc7716b9c7ff97aecc95f0b4d56e4ca08b3c8d8adcaadb84"}, - {file = "coverage-7.6.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:536f77f2bf5797983652d1d55f1a7272a29afcc89e3ae51caa99b2db4e89d658"}, - {file = "coverage-7.6.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f361296ca7054f0936b02525646b2731b32c8074ba6defab524b79b2b7eeac72"}, - {file = "coverage-7.6.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7926d8d034e06b479797c199747dd774d5e86179f2ce44294423327a88d66ca7"}, - {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0bbae11c138585c89fb4e991faefb174a80112e1a7557d507aaa07675c62e66b"}, - {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fcad7d5d2bbfeae1026b395036a8aa5abf67e8038ae7e6a25c7d0f88b10a8e6a"}, - {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f01e53575f27097d75d42de33b1b289c74b16891ce576d767ad8c48d17aeb5e0"}, - {file = "coverage-7.6.2-cp311-cp311-win32.whl", hash = "sha256:7781f4f70c9b0b39e1b129b10c7d43a4e0c91f90c60435e6da8288efc2b73438"}, - {file = "coverage-7.6.2-cp311-cp311-win_amd64.whl", hash = "sha256:9bcd51eeca35a80e76dc5794a9dd7cb04b97f0e8af620d54711793bfc1fbba4b"}, - {file = "coverage-7.6.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ebc94fadbd4a3f4215993326a6a00e47d79889391f5659bf310f55fe5d9f581c"}, - {file = "coverage-7.6.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9681516288e3dcf0aa7c26231178cc0be6cac9705cac06709f2353c5b406cfea"}, - {file = "coverage-7.6.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d9c5d13927d77af4fbe453953810db766f75401e764727e73a6ee4f82527b3e"}, - {file = "coverage-7.6.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92f9ca04b3e719d69b02dc4a69debb795af84cb7afd09c5eb5d54b4a1ae2191"}, - {file = "coverage-7.6.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ff2ef83d6d0b527b5c9dad73819b24a2f76fdddcfd6c4e7a4d7e73ecb0656b4"}, - {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47ccb6e99a3031ffbbd6e7cc041e70770b4fe405370c66a54dbf26a500ded80b"}, - {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a867d26f06bcd047ef716175b2696b315cb7571ccb951006d61ca80bbc356e9e"}, - {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cdfcf2e914e2ba653101157458afd0ad92a16731eeba9a611b5cbb3e7124e74b"}, - {file = "coverage-7.6.2-cp312-cp312-win32.whl", hash = "sha256:f9035695dadfb397bee9eeaf1dc7fbeda483bf7664a7397a629846800ce6e276"}, - {file = "coverage-7.6.2-cp312-cp312-win_amd64.whl", hash = "sha256:5ed69befa9a9fc796fe015a7040c9398722d6b97df73a6b608e9e275fa0932b0"}, - {file = "coverage-7.6.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4eea60c79d36a8f39475b1af887663bc3ae4f31289cd216f514ce18d5938df40"}, - {file = "coverage-7.6.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa68a6cdbe1bc6793a9dbfc38302c11599bbe1837392ae9b1d238b9ef3dafcf1"}, - {file = "coverage-7.6.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ec528ae69f0a139690fad6deac8a7d33629fa61ccce693fdd07ddf7e9931fba"}, - {file = "coverage-7.6.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed5ac02126f74d190fa2cc14a9eb2a5d9837d5863920fa472b02eb1595cdc925"}, - {file = "coverage-7.6.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21c0ea0d4db8a36b275cb6fb2437a3715697a4ba3cb7b918d3525cc75f726304"}, - {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:35a51598f29b2a19e26d0908bd196f771a9b1c5d9a07bf20be0adf28f1ad4f77"}, - {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c9192925acc33e146864b8cf037e2ed32a91fdf7644ae875f5d46cd2ef086a5f"}, - {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf4eeecc9e10f5403ec06138978235af79c9a79af494eb6b1d60a50b49ed2869"}, - {file = "coverage-7.6.2-cp313-cp313-win32.whl", hash = "sha256:e4ee15b267d2dad3e8759ca441ad450c334f3733304c55210c2a44516e8d5530"}, - {file = "coverage-7.6.2-cp313-cp313-win_amd64.whl", hash = "sha256:c71965d1ced48bf97aab79fad56df82c566b4c498ffc09c2094605727c4b7e36"}, - {file = "coverage-7.6.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7571e8bbecc6ac066256f9de40365ff833553e2e0c0c004f4482facb131820ef"}, - {file = "coverage-7.6.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:078a87519057dacb5d77e333f740708ec2a8f768655f1db07f8dfd28d7a005f0"}, - {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e5e92e3e84a8718d2de36cd8387459cba9a4508337b8c5f450ce42b87a9e760"}, - {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ebabdf1c76593a09ee18c1a06cd3022919861365219ea3aca0247ededf6facd6"}, - {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12179eb0575b8900912711688e45474f04ab3934aaa7b624dea7b3c511ecc90f"}, - {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:39d3b964abfe1519b9d313ab28abf1d02faea26cd14b27f5283849bf59479ff5"}, - {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:84c4315577f7cd511d6250ffd0f695c825efe729f4205c0340f7004eda51191f"}, - {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ff797320dcbff57caa6b2301c3913784a010e13b1f6cf4ab3f563f3c5e7919db"}, - {file = "coverage-7.6.2-cp313-cp313t-win32.whl", hash = "sha256:2b636a301e53964550e2f3094484fa5a96e699db318d65398cfba438c5c92171"}, - {file = "coverage-7.6.2-cp313-cp313t-win_amd64.whl", hash = "sha256:d03a060ac1a08e10589c27d509bbdb35b65f2d7f3f8d81cf2fa199877c7bc58a"}, - {file = "coverage-7.6.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c37faddc8acd826cfc5e2392531aba734b229741d3daec7f4c777a8f0d4993e5"}, - {file = "coverage-7.6.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab31fdd643f162c467cfe6a86e9cb5f1965b632e5e65c072d90854ff486d02cf"}, - {file = "coverage-7.6.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97df87e1a20deb75ac7d920c812e9326096aa00a9a4b6d07679b4f1f14b06c90"}, - {file = "coverage-7.6.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343056c5e0737487a5291f5691f4dfeb25b3e3c8699b4d36b92bb0e586219d14"}, - {file = "coverage-7.6.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad4ef1c56b47b6b9024b939d503ab487231df1f722065a48f4fc61832130b90e"}, - {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fca4a92c8a7a73dee6946471bce6d1443d94155694b893b79e19ca2a540d86e"}, - {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69f251804e052fc46d29d0e7348cdc5fcbfc4861dc4a1ebedef7e78d241ad39e"}, - {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e8ea055b3ea046c0f66217af65bc193bbbeca1c8661dc5fd42698db5795d2627"}, - {file = "coverage-7.6.2-cp39-cp39-win32.whl", hash = "sha256:6c2ba1e0c24d8fae8f2cf0aeb2fc0a2a7f69b6d20bd8d3749fd6b36ecef5edf0"}, - {file = "coverage-7.6.2-cp39-cp39-win_amd64.whl", hash = "sha256:2186369a654a15628e9c1c9921409a6b3eda833e4b91f3ca2a7d9f77abb4987c"}, - {file = "coverage-7.6.2-pp39.pp310-none-any.whl", hash = "sha256:667952739daafe9616db19fbedbdb87917eee253ac4f31d70c7587f7ab531b4e"}, - {file = "coverage-7.6.2.tar.gz", hash = "sha256:a5f81e68aa62bc0cfca04f7b19eaa8f9c826b53fc82ab9e2121976dc74f131f3"}, + {file = "coverage-7.6.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78"}, + {file = "coverage-7.6.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5"}, + {file = "coverage-7.6.10-cp310-cp310-win32.whl", hash = "sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244"}, + {file = "coverage-7.6.10-cp310-cp310-win_amd64.whl", hash = "sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e"}, + {file = "coverage-7.6.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3"}, + {file = "coverage-7.6.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377"}, + {file = "coverage-7.6.10-cp311-cp311-win32.whl", hash = "sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8"}, + {file = "coverage-7.6.10-cp311-cp311-win_amd64.whl", hash = "sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609"}, + {file = "coverage-7.6.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853"}, + {file = "coverage-7.6.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852"}, + {file = "coverage-7.6.10-cp312-cp312-win32.whl", hash = "sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359"}, + {file = "coverage-7.6.10-cp312-cp312-win_amd64.whl", hash = "sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247"}, + {file = "coverage-7.6.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9"}, + {file = "coverage-7.6.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694"}, + {file = "coverage-7.6.10-cp313-cp313-win32.whl", hash = "sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6"}, + {file = "coverage-7.6.10-cp313-cp313-win_amd64.whl", hash = "sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e"}, + {file = "coverage-7.6.10-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe"}, + {file = "coverage-7.6.10-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2"}, + {file = "coverage-7.6.10-cp313-cp313t-win32.whl", hash = "sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312"}, + {file = "coverage-7.6.10-cp313-cp313t-win_amd64.whl", hash = "sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d"}, + {file = "coverage-7.6.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:656c82b8a0ead8bba147de9a89bda95064874c91a3ed43a00e687f23cc19d53a"}, + {file = "coverage-7.6.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccc2b70a7ed475c68ceb548bf69cec1e27305c1c2606a5eb7c3afff56a1b3b27"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e37dc41d57ceba70956fa2fc5b63c26dba863c946ace9705f8eca99daecdc4"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0aa9692b4fdd83a4647eeb7db46410ea1322b5ed94cd1715ef09d1d5922ba87f"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa744da1820678b475e4ba3dfd994c321c5b13381d1041fe9c608620e6676e25"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0b1818063dc9e9d838c09e3a473c1422f517889436dd980f5d721899e66f315"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:59af35558ba08b758aec4d56182b222976330ef8d2feacbb93964f576a7e7a90"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7ed2f37cfce1ce101e6dffdfd1c99e729dd2ffc291d02d3e2d0af8b53d13840d"}, + {file = "coverage-7.6.10-cp39-cp39-win32.whl", hash = "sha256:4bcc276261505d82f0ad426870c3b12cb177752834a633e737ec5ee79bbdff18"}, + {file = "coverage-7.6.10-cp39-cp39-win_amd64.whl", hash = "sha256:457574f4599d2b00f7f637a0700a6422243b3565509457b2dbd3f50703e11f59"}, + {file = "coverage-7.6.10-pp39.pp310-none-any.whl", hash = "sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f"}, + {file = "coverage-7.6.10.tar.gz", hash = "sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23"}, ] [package.dependencies] @@ -994,22 +966,22 @@ files = [ [[package]] name = "datasets" -version = "3.0.1" +version = "3.2.0" description = "HuggingFace community-driven open-source library of datasets" optional = false -python-versions = ">=3.8.0" +python-versions = ">=3.9.0" files = [ - {file = "datasets-3.0.1-py3-none-any.whl", hash = "sha256:db080aab41c8cc68645117a0f172e5c6789cbc672f066de0aa5a08fc3eebc686"}, - {file = "datasets-3.0.1.tar.gz", hash = "sha256:40d63b09e76a3066c32e746d6fdc36fd3f29ed2acd49bf5b1a2100da32936511"}, + {file = "datasets-3.2.0-py3-none-any.whl", hash = "sha256:f3d2ba2698b7284a4518019658596a6a8bc79f31e51516524249d6c59cf0fe2a"}, + {file = "datasets-3.2.0.tar.gz", hash = "sha256:9a6e1a356052866b5dbdd9c9eedb000bf3fc43d986e3584d9b028f4976937229"}, ] [package.dependencies] aiohttp = "*" dill = ">=0.3.0,<0.3.9" filelock = "*" -fsspec = {version = ">=2023.1.0,<=2024.6.1", extras = ["http"]} -huggingface-hub = ">=0.22.0" -multiprocess = "*" +fsspec = {version = ">=2023.1.0,<=2024.9.0", extras = ["http"]} +huggingface-hub = ">=0.23.0" +multiprocess = "<0.70.17" numpy = ">=1.17" packaging = "*" pandas = "*" @@ -1022,51 +994,51 @@ xxhash = "*" [package.extras] audio = ["librosa", "soundfile (>=0.12.1)", "soxr (>=0.4.0)"] benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] -dev = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.8.0.post1)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tensorflow (>=2.16.0)", "tensorflow (>=2.6.0)", "tensorflow (>=2.6.0)", "tiktoken", "torch", "torch (>=2.0.0)", "torchdata", "transformers", "transformers (>=4.42.0)", "zstandard"] +dev = ["Pillow (>=9.4.0)", "absl-py", "decorator", "decord (==0.6.0)", "elasticsearch (>=7.17.12,<8.0.0)", "faiss-cpu (>=1.8.0.post1)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tensorflow (>=2.16.0)", "tensorflow (>=2.6.0)", "tensorflow (>=2.6.0)", "tiktoken", "torch", "torch (>=2.0.0)", "torchdata", "transformers", "transformers (>=4.42.0)", "zstandard"] docs = ["s3fs", "tensorflow (>=2.6.0)", "torch", "transformers"] jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] quality = ["ruff (>=0.3.0)"] s3 = ["s3fs"] tensorflow = ["tensorflow (>=2.6.0)"] tensorflow-gpu = ["tensorflow (>=2.6.0)"] -tests = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.8.0.post1)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tensorflow (>=2.16.0)", "tensorflow (>=2.6.0)", "tiktoken", "torch (>=2.0.0)", "torchdata", "transformers (>=4.42.0)", "zstandard"] -tests-numpy2 = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tiktoken", "torch (>=2.0.0)", "torchdata", "transformers (>=4.42.0)", "zstandard"] +tests = ["Pillow (>=9.4.0)", "absl-py", "decorator", "decord (==0.6.0)", "elasticsearch (>=7.17.12,<8.0.0)", "faiss-cpu (>=1.8.0.post1)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tensorflow (>=2.16.0)", "tensorflow (>=2.6.0)", "tiktoken", "torch (>=2.0.0)", "torchdata", "transformers (>=4.42.0)", "zstandard"] +tests-numpy2 = ["Pillow (>=9.4.0)", "absl-py", "decorator", "decord (==0.6.0)", "elasticsearch (>=7.17.12,<8.0.0)", "jax (>=0.3.14)", "jaxlib (>=0.3.14)", "joblib (<1.3.0)", "joblibspark", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0)", "sqlalchemy", "tiktoken", "torch (>=2.0.0)", "torchdata", "transformers (>=4.42.0)", "zstandard"] torch = ["torch"] vision = ["Pillow (>=9.4.0)"] [[package]] name = "debugpy" -version = "1.8.7" +version = "1.8.11" description = "An implementation of the Debug Adapter Protocol for Python" optional = true python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.7-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95fe04a573b8b22896c404365e03f4eda0ce0ba135b7667a1e57bd079793b96b"}, - {file = "debugpy-1.8.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:628a11f4b295ffb4141d8242a9bb52b77ad4a63a2ad19217a93be0f77f2c28c9"}, - {file = "debugpy-1.8.7-cp310-cp310-win32.whl", hash = "sha256:85ce9c1d0eebf622f86cc68618ad64bf66c4fc3197d88f74bb695a416837dd55"}, - {file = "debugpy-1.8.7-cp310-cp310-win_amd64.whl", hash = "sha256:29e1571c276d643757ea126d014abda081eb5ea4c851628b33de0c2b6245b037"}, - {file = "debugpy-1.8.7-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:caf528ff9e7308b74a1749c183d6808ffbedbb9fb6af78b033c28974d9b8831f"}, - {file = "debugpy-1.8.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cba1d078cf2e1e0b8402e6bda528bf8fda7ccd158c3dba6c012b7897747c41a0"}, - {file = "debugpy-1.8.7-cp311-cp311-win32.whl", hash = "sha256:171899588bcd412151e593bd40d9907133a7622cd6ecdbdb75f89d1551df13c2"}, - {file = "debugpy-1.8.7-cp311-cp311-win_amd64.whl", hash = "sha256:6e1c4ffb0c79f66e89dfd97944f335880f0d50ad29525dc792785384923e2211"}, - {file = "debugpy-1.8.7-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:4d27d842311353ede0ad572600c62e4bcd74f458ee01ab0dd3a1a4457e7e3706"}, - {file = "debugpy-1.8.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c1fd62ae0356e194f3e7b7a92acd931f71fe81c4b3be2c17a7b8a4b546ec2"}, - {file = "debugpy-1.8.7-cp312-cp312-win32.whl", hash = "sha256:2f729228430ef191c1e4df72a75ac94e9bf77413ce5f3f900018712c9da0aaca"}, - {file = "debugpy-1.8.7-cp312-cp312-win_amd64.whl", hash = "sha256:45c30aaefb3e1975e8a0258f5bbd26cd40cde9bfe71e9e5a7ac82e79bad64e39"}, - {file = "debugpy-1.8.7-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:d050a1ec7e925f514f0f6594a1e522580317da31fbda1af71d1530d6ea1f2b40"}, - {file = "debugpy-1.8.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f4349a28e3228a42958f8ddaa6333d6f8282d5edaea456070e48609c5983b7"}, - {file = "debugpy-1.8.7-cp313-cp313-win32.whl", hash = "sha256:11ad72eb9ddb436afb8337891a986302e14944f0f755fd94e90d0d71e9100bba"}, - {file = "debugpy-1.8.7-cp313-cp313-win_amd64.whl", hash = "sha256:2efb84d6789352d7950b03d7f866e6d180284bc02c7e12cb37b489b7083d81aa"}, - {file = "debugpy-1.8.7-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:4b908291a1d051ef3331484de8e959ef3e66f12b5e610c203b5b75d2725613a7"}, - {file = "debugpy-1.8.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da8df5b89a41f1fd31503b179d0a84a5fdb752dddd5b5388dbd1ae23cda31ce9"}, - {file = "debugpy-1.8.7-cp38-cp38-win32.whl", hash = "sha256:b12515e04720e9e5c2216cc7086d0edadf25d7ab7e3564ec8b4521cf111b4f8c"}, - {file = "debugpy-1.8.7-cp38-cp38-win_amd64.whl", hash = "sha256:93176e7672551cb5281577cdb62c63aadc87ec036f0c6a486f0ded337c504596"}, - {file = "debugpy-1.8.7-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:90d93e4f2db442f8222dec5ec55ccfc8005821028982f1968ebf551d32b28907"}, - {file = "debugpy-1.8.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6db2a370e2700557a976eaadb16243ec9c91bd46f1b3bb15376d7aaa7632c81"}, - {file = "debugpy-1.8.7-cp39-cp39-win32.whl", hash = "sha256:a6cf2510740e0c0b4a40330640e4b454f928c7b99b0c9dbf48b11efba08a8cda"}, - {file = "debugpy-1.8.7-cp39-cp39-win_amd64.whl", hash = "sha256:6a9d9d6d31846d8e34f52987ee0f1a904c7baa4912bf4843ab39dadf9b8f3e0d"}, - {file = "debugpy-1.8.7-py2.py3-none-any.whl", hash = "sha256:57b00de1c8d2c84a61b90880f7e5b6deaf4c312ecbde3a0e8912f2a56c4ac9ae"}, - {file = "debugpy-1.8.7.zip", hash = "sha256:18b8f731ed3e2e1df8e9cdaa23fb1fc9c24e570cd0081625308ec51c82efe42e"}, + {file = "debugpy-1.8.11-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:2b26fefc4e31ff85593d68b9022e35e8925714a10ab4858fb1b577a8a48cb8cd"}, + {file = "debugpy-1.8.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61bc8b3b265e6949855300e84dc93d02d7a3a637f2aec6d382afd4ceb9120c9f"}, + {file = "debugpy-1.8.11-cp310-cp310-win32.whl", hash = "sha256:c928bbf47f65288574b78518449edaa46c82572d340e2750889bbf8cd92f3737"}, + {file = "debugpy-1.8.11-cp310-cp310-win_amd64.whl", hash = "sha256:8da1db4ca4f22583e834dcabdc7832e56fe16275253ee53ba66627b86e304da1"}, + {file = "debugpy-1.8.11-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:85de8474ad53ad546ff1c7c7c89230db215b9b8a02754d41cb5a76f70d0be296"}, + {file = "debugpy-1.8.11-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ffc382e4afa4aee367bf413f55ed17bd91b191dcaf979890af239dda435f2a1"}, + {file = "debugpy-1.8.11-cp311-cp311-win32.whl", hash = "sha256:40499a9979c55f72f4eb2fc38695419546b62594f8af194b879d2a18439c97a9"}, + {file = "debugpy-1.8.11-cp311-cp311-win_amd64.whl", hash = "sha256:987bce16e86efa86f747d5151c54e91b3c1e36acc03ce1ddb50f9d09d16ded0e"}, + {file = "debugpy-1.8.11-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:84e511a7545d11683d32cdb8f809ef63fc17ea2a00455cc62d0a4dbb4ed1c308"}, + {file = "debugpy-1.8.11-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce291a5aca4985d82875d6779f61375e959208cdf09fcec40001e65fb0a54768"}, + {file = "debugpy-1.8.11-cp312-cp312-win32.whl", hash = "sha256:28e45b3f827d3bf2592f3cf7ae63282e859f3259db44ed2b129093ca0ac7940b"}, + {file = "debugpy-1.8.11-cp312-cp312-win_amd64.whl", hash = "sha256:44b1b8e6253bceada11f714acf4309ffb98bfa9ac55e4fce14f9e5d4484287a1"}, + {file = "debugpy-1.8.11-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:8988f7163e4381b0da7696f37eec7aca19deb02e500245df68a7159739bbd0d3"}, + {file = "debugpy-1.8.11-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c1f6a173d1140e557347419767d2b14ac1c9cd847e0b4c5444c7f3144697e4e"}, + {file = "debugpy-1.8.11-cp313-cp313-win32.whl", hash = "sha256:bb3b15e25891f38da3ca0740271e63ab9db61f41d4d8541745cfc1824252cb28"}, + {file = "debugpy-1.8.11-cp313-cp313-win_amd64.whl", hash = "sha256:d8768edcbeb34da9e11bcb8b5c2e0958d25218df7a6e56adf415ef262cd7b6d1"}, + {file = "debugpy-1.8.11-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:ad7efe588c8f5cf940f40c3de0cd683cc5b76819446abaa50dc0829a30c094db"}, + {file = "debugpy-1.8.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:189058d03a40103a57144752652b3ab08ff02b7595d0ce1f651b9acc3a3a35a0"}, + {file = "debugpy-1.8.11-cp38-cp38-win32.whl", hash = "sha256:32db46ba45849daed7ccf3f2e26f7a386867b077f39b2a974bb5c4c2c3b0a280"}, + {file = "debugpy-1.8.11-cp38-cp38-win_amd64.whl", hash = "sha256:116bf8342062246ca749013df4f6ea106f23bc159305843491f64672a55af2e5"}, + {file = "debugpy-1.8.11-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:654130ca6ad5de73d978057eaf9e582244ff72d4574b3e106fb8d3d2a0d32458"}, + {file = "debugpy-1.8.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23dc34c5e03b0212fa3c49a874df2b8b1b8fda95160bd79c01eb3ab51ea8d851"}, + {file = "debugpy-1.8.11-cp39-cp39-win32.whl", hash = "sha256:52d8a3166c9f2815bfae05f386114b0b2d274456980d41f320299a8d9a5615a7"}, + {file = "debugpy-1.8.11-cp39-cp39-win_amd64.whl", hash = "sha256:52c3cf9ecda273a19cc092961ee34eb9ba8687d67ba34cc7b79a521c1c64c4c0"}, + {file = "debugpy-1.8.11-py2.py3-none-any.whl", hash = "sha256:0e22f846f4211383e6a416d04b4c13ed174d24cc5d43f5fd52e7821d0ebc8920"}, + {file = "debugpy-1.8.11.tar.gz", hash = "sha256:6ad2688b69235c43b020e04fecccdf6a96c8943ca9c2fb340b8adc103c655e57"}, ] [[package]] @@ -1082,20 +1054,20 @@ files = [ [[package]] name = "deepdiff" -version = "8.0.1" +version = "8.1.1" description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other." optional = false python-versions = ">=3.8" files = [ - {file = "deepdiff-8.0.1-py3-none-any.whl", hash = "sha256:42e99004ce603f9a53934c634a57b04ad5900e0d8ed0abb15e635767489cbc05"}, - {file = "deepdiff-8.0.1.tar.gz", hash = "sha256:245599a4586ab59bb599ca3517a9c42f3318ff600ded5e80a3432693c8ec3c4b"}, + {file = "deepdiff-8.1.1-py3-none-any.whl", hash = "sha256:b0231fa3afb0f7184e82535f2b4a36636442ed21e94a0cf3aaa7982157e7ebca"}, + {file = "deepdiff-8.1.1.tar.gz", hash = "sha256:dd7bc7d5c8b51b5b90f01b0e2fe23c801fd8b4c6a7ee7e31c5a3c3663fcc7ceb"}, ] [package.dependencies] -orderly-set = "5.2.2" +orderly-set = ">=5.2.3,<6" [package.extras] -cli = ["click (==8.1.7)", "pyyaml (==6.0.1)"] +cli = ["click (==8.1.7)", "pyyaml (==6.0.2)"] optimize = ["orjson"] [[package]] @@ -1111,13 +1083,13 @@ files = [ [[package]] name = "diffusers" -version = "0.30.3" +version = "0.32.1" description = "State-of-the-art diffusion in PyTorch and JAX." optional = false python-versions = ">=3.8.0" files = [ - {file = "diffusers-0.30.3-py3-none-any.whl", hash = "sha256:1b70209e4d2c61223b96a7e13bc4d70869c8b0b68f54a35ce3a67fcf813edeee"}, - {file = "diffusers-0.30.3.tar.gz", hash = "sha256:67c5eb25d5b50bf0742624ef43fe0f6d1e1604f64aad3e8558469cbe89ecf72f"}, + {file = "diffusers-0.32.1-py3-none-any.whl", hash = "sha256:ab2ed6efe9dd2364db55b94fae453ff149dc06693ec99702cadfbeeaaa00c9c1"}, + {file = "diffusers-0.32.1.tar.gz", hash = "sha256:d48c19a4bce612d225f857e2e5d4f6ee003b88234af46b44c20119ee23f1568c"}, ] [package.dependencies] @@ -1283,21 +1255,21 @@ six = ">=1.4.0" [[package]] name = "dora-rs" -version = "0.3.6" +version = "0.3.8" description = "`dora` goal is to be a low latency, composable, and distributed data flow." optional = true python-versions = "*" files = [ - {file = "dora_rs-0.3.6-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c036d2d0792d8d6e0e9db1936ab5fd4c6d19e097f3fc259058733e526f94253a"}, - {file = "dora_rs-0.3.6-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:95036f6fcb5aeb7bba8a1f37d84c627eefe09af1db17e36bc19209e950652446"}, - {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b5ef774dbafbdf8bda56939c6475916b7ec8f4b0c57c5b80f1d46eb642f5d07"}, - {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:78656d3ae1282a142a5fed410ec3a6f725fdf8d9f9192ed673e336ea3b083e12"}, - {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:681e22c8ecb3b48d11cb9019f8a32d4ae1e353e20d4ce3a0f0eedd0ccbd95e5f"}, - {file = "dora_rs-0.3.6-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4598572bab6f726ec41fabb43bf0f7e3cf8082ea0f6f8f4e57845a6c919f31b3"}, - {file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:297350f05f5f87a0bf647a1e5b4446728e5f800788c6bb28b462bcd167f1de7f"}, - {file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:b1870a8e30f0ac298d17fd546224348d13a648bcfa0cbc51dba7e5136c1af928"}, - {file = "dora_rs-0.3.6-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:182a189212d41be0c960fd3299bf6731af2e771f8858cfb1be7ebcc17d60a254"}, - {file = "dora_rs-0.3.6-cp37-abi3-win_amd64.whl", hash = "sha256:a8f9343073e3fbca6bff3f0a13e5d2feabbe841a985c49e4294f7c14eb747bb5"}, + {file = "dora_rs-0.3.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:4c3a17bea5a77d92383779e8a2016238f5af06596434b75e97855a10e8ffd5a3"}, + {file = "dora_rs-0.3.8-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:30b5cf340d44b3159dcac9ee0d76cfc6503e4a2d3d7163b5dbe4d78692a683ea"}, + {file = "dora_rs-0.3.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:779ae2cff44a3b4fe3f1de23f33577c54df1fc053ca2e07b3314084209298ef0"}, + {file = "dora_rs-0.3.8-cp37-abi3-manylinux_2_28_armv7l.whl", hash = "sha256:8918f22e4718761e8b899f95ebfb7e15b29915830fffc7c64f13d2633229dd4b"}, + {file = "dora_rs-0.3.8-cp37-abi3-manylinux_2_28_i686.whl", hash = "sha256:f44e4473dbfdc1eb90f3adc574f0ff235de7f0f01e4b0b4464b875f08ea653e9"}, + {file = "dora_rs-0.3.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b4177bf9a5094c21f9799d7970aff86c0d688c5d49886b1b92aad9c6b388c775"}, + {file = "dora_rs-0.3.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b044d074650e54b9f9b8ce1e15e8090f2e1f00a72ad0e03dd88295cdb51d3981"}, + {file = "dora_rs-0.3.8-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:bb6d7c2e188cbe0ba74a378777f14884fe2a97430945a1711c1d80ae78bff40c"}, + {file = "dora_rs-0.3.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4cd48dbae322726f6604217e837ffdccc1f38110566ea2f5a45e712e1db1f04a"}, + {file = "dora_rs-0.3.8-cp37-abi3-win_amd64.whl", hash = "sha256:2b50d58a4e3ce76d3ef44bdd51477c7c4e3d58a83041619310a83474b83de25a"}, ] [package.dependencies] @@ -1309,10 +1281,8 @@ version = "0.9.3" description = "A slightly opinionated framework for simple dataclass-based configurations based on Pyrallis." optional = false python-versions = ">=3.8" -files = [ - {file = "draccus-0.9.3-py3-none-any.whl", hash = "sha256:04d3fe14d2b7d19290e6f7c76ff29fbfcc9b56e9e7b76d9439a18a26a1dbfe5e"}, - {file = "draccus-0.9.3.tar.gz", hash = "sha256:41db52347f5513deadfb8d512fed43bb41499ac5e63559530688c1d95a978043"}, -] +files = [] +develop = false [package.dependencies] mergedeep = ">=1.3,<2.0" @@ -1324,6 +1294,12 @@ typing-inspect = ">=0.9.0,<0.10.0" [package.extras] dev = ["black", "mypy", "pre-commit", "pytest", "ruff"] +[package.source] +type = "git" +url = "https://github.com/dlwh/draccus.git" +reference = "55e456a" +resolved_reference = "55e456a3047a97135e47bbeefe06873d2727dd41" + [[package]] name = "drawnow" version = "0.72.5" @@ -1424,13 +1400,13 @@ files = [ [[package]] name = "fastjsonschema" -version = "2.20.0" +version = "2.21.1" description = "Fastest Python implementation of JSON schema" optional = true python-versions = "*" files = [ - {file = "fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a"}, - {file = "fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23"}, + {file = "fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667"}, + {file = "fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4"}, ] [package.extras] @@ -1467,21 +1443,21 @@ typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "flask" -version = "3.0.3" +version = "3.1.0" description = "A simple framework for building complex web applications." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "flask-3.0.3-py3-none-any.whl", hash = "sha256:34e815dfaa43340d1d15a5c3a02b8476004037eb4840b34910c6e21679d288f3"}, - {file = "flask-3.0.3.tar.gz", hash = "sha256:ceb27b0af3823ea2737928a4d99d125a06175b8512c445cbd9a9ce200ef76842"}, + {file = "flask-3.1.0-py3-none-any.whl", hash = "sha256:d667207822eb83f1c4b50949b1623c8fc8d51f2341d65f72e1a1815397551136"}, + {file = "flask-3.1.0.tar.gz", hash = "sha256:5f873c5184c897c8d9d1b05df1e3d01b14910ce69607a117bd3277098a5836ac"}, ] [package.dependencies] -blinker = ">=1.6.2" +blinker = ">=1.9" click = ">=8.1.3" -itsdangerous = ">=2.1.2" +itsdangerous = ">=2.2" Jinja2 = ">=3.1.2" -Werkzeug = ">=3.0.0" +Werkzeug = ">=3.1" [package.extras] async = ["asgiref (>=3.2)"] @@ -1489,59 +1465,61 @@ dotenv = ["python-dotenv"] [[package]] name = "fonttools" -version = "4.54.1" +version = "4.55.3" description = "Tools to manipulate font files" optional = true python-versions = ">=3.8" files = [ - {file = "fonttools-4.54.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ed7ee041ff7b34cc62f07545e55e1468808691dddfd315d51dd82a6b37ddef2"}, - {file = "fonttools-4.54.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41bb0b250c8132b2fcac148e2e9198e62ff06f3cc472065dff839327945c5882"}, - {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7965af9b67dd546e52afcf2e38641b5be956d68c425bef2158e95af11d229f10"}, - {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278913a168f90d53378c20c23b80f4e599dca62fbffae4cc620c8eed476b723e"}, - {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0e88e3018ac809b9662615072dcd6b84dca4c2d991c6d66e1970a112503bba7e"}, - {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4817f0031206e637d1e685251ac61be64d1adef111060df84fdcbc6ab6c44"}, - {file = "fonttools-4.54.1-cp310-cp310-win32.whl", hash = "sha256:7e3b7d44e18c085fd8c16dcc6f1ad6c61b71ff463636fcb13df7b1b818bd0c02"}, - {file = "fonttools-4.54.1-cp310-cp310-win_amd64.whl", hash = "sha256:dd9cc95b8d6e27d01e1e1f1fae8559ef3c02c76317da650a19047f249acd519d"}, - {file = "fonttools-4.54.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5419771b64248484299fa77689d4f3aeed643ea6630b2ea750eeab219588ba20"}, - {file = "fonttools-4.54.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:301540e89cf4ce89d462eb23a89464fef50915255ece765d10eee8b2bf9d75b2"}, - {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ae5091547e74e7efecc3cbf8e75200bc92daaeb88e5433c5e3e95ea8ce5aa7"}, - {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82834962b3d7c5ca98cb56001c33cf20eb110ecf442725dc5fdf36d16ed1ab07"}, - {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d26732ae002cc3d2ecab04897bb02ae3f11f06dd7575d1df46acd2f7c012a8d8"}, - {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58974b4987b2a71ee08ade1e7f47f410c367cdfc5a94fabd599c88165f56213a"}, - {file = "fonttools-4.54.1-cp311-cp311-win32.whl", hash = "sha256:ab774fa225238986218a463f3fe151e04d8c25d7de09df7f0f5fce27b1243dbc"}, - {file = "fonttools-4.54.1-cp311-cp311-win_amd64.whl", hash = "sha256:07e005dc454eee1cc60105d6a29593459a06321c21897f769a281ff2d08939f6"}, - {file = "fonttools-4.54.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:54471032f7cb5fca694b5f1a0aaeba4af6e10ae989df408e0216f7fd6cdc405d"}, - {file = "fonttools-4.54.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fa92cb248e573daab8d032919623cc309c005086d743afb014c836636166f08"}, - {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a911591200114969befa7f2cb74ac148bce5a91df5645443371aba6d222e263"}, - {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93d458c8a6a354dc8b48fc78d66d2a8a90b941f7fec30e94c7ad9982b1fa6bab"}, - {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5eb2474a7c5be8a5331146758debb2669bf5635c021aee00fd7c353558fc659d"}, - {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9c563351ddc230725c4bdf7d9e1e92cbe6ae8553942bd1fb2b2ff0884e8b714"}, - {file = "fonttools-4.54.1-cp312-cp312-win32.whl", hash = "sha256:fdb062893fd6d47b527d39346e0c5578b7957dcea6d6a3b6794569370013d9ac"}, - {file = "fonttools-4.54.1-cp312-cp312-win_amd64.whl", hash = "sha256:e4564cf40cebcb53f3dc825e85910bf54835e8a8b6880d59e5159f0f325e637e"}, - {file = "fonttools-4.54.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6e37561751b017cf5c40fce0d90fd9e8274716de327ec4ffb0df957160be3bff"}, - {file = "fonttools-4.54.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:357cacb988a18aace66e5e55fe1247f2ee706e01debc4b1a20d77400354cddeb"}, - {file = "fonttools-4.54.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e953cc0bddc2beaf3a3c3b5dd9ab7554677da72dfaf46951e193c9653e515a"}, - {file = "fonttools-4.54.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58d29b9a294573d8319f16f2f79e42428ba9b6480442fa1836e4eb89c4d9d61c"}, - {file = "fonttools-4.54.1-cp313-cp313-win32.whl", hash = "sha256:9ef1b167e22709b46bf8168368b7b5d3efeaaa746c6d39661c1b4405b6352e58"}, - {file = "fonttools-4.54.1-cp313-cp313-win_amd64.whl", hash = "sha256:262705b1663f18c04250bd1242b0515d3bbae177bee7752be67c979b7d47f43d"}, - {file = "fonttools-4.54.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ed2f80ca07025551636c555dec2b755dd005e2ea8fbeb99fc5cdff319b70b23b"}, - {file = "fonttools-4.54.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9dc080e5a1c3b2656caff2ac2633d009b3a9ff7b5e93d0452f40cd76d3da3b3c"}, - {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d152d1be65652fc65e695e5619e0aa0982295a95a9b29b52b85775243c06556"}, - {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8583e563df41fdecef31b793b4dd3af8a9caa03397be648945ad32717a92885b"}, - {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d1d353ef198c422515a3e974a1e8d5b304cd54a4c2eebcae708e37cd9eeffb1"}, - {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fda582236fee135d4daeca056c8c88ec5f6f6d88a004a79b84a02547c8f57386"}, - {file = "fonttools-4.54.1-cp38-cp38-win32.whl", hash = "sha256:e7d82b9e56716ed32574ee106cabca80992e6bbdcf25a88d97d21f73a0aae664"}, - {file = "fonttools-4.54.1-cp38-cp38-win_amd64.whl", hash = "sha256:ada215fd079e23e060157aab12eba0d66704316547f334eee9ff26f8c0d7b8ab"}, - {file = "fonttools-4.54.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f5b8a096e649768c2f4233f947cf9737f8dbf8728b90e2771e2497c6e3d21d13"}, - {file = "fonttools-4.54.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e10d2e0a12e18f4e2dd031e1bf7c3d7017be5c8dbe524d07706179f355c5dac"}, - {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31c32d7d4b0958600eac75eaf524b7b7cb68d3a8c196635252b7a2c30d80e986"}, - {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c39287f5c8f4a0c5a55daf9eaf9ccd223ea59eed3f6d467133cc727d7b943a55"}, - {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a7a310c6e0471602fe3bf8efaf193d396ea561486aeaa7adc1f132e02d30c4b9"}, - {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d3b659d1029946f4ff9b6183984578041b520ce0f8fb7078bb37ec7445806b33"}, - {file = "fonttools-4.54.1-cp39-cp39-win32.whl", hash = "sha256:e96bc94c8cda58f577277d4a71f51c8e2129b8b36fd05adece6320dd3d57de8a"}, - {file = "fonttools-4.54.1-cp39-cp39-win_amd64.whl", hash = "sha256:e8a4b261c1ef91e7188a30571be6ad98d1c6d9fa2427244c545e2fa0a2494dd7"}, - {file = "fonttools-4.54.1-py3-none-any.whl", hash = "sha256:37cddd62d83dc4f72f7c3f3c2bcf2697e89a30efb152079896544a93907733bd"}, - {file = "fonttools-4.54.1.tar.gz", hash = "sha256:957f669d4922f92c171ba01bef7f29410668db09f6c02111e22b2bce446f3285"}, + {file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1dcc07934a2165ccdc3a5a608db56fb3c24b609658a5b340aee4ecf3ba679dc0"}, + {file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f7d66c15ba875432a2d2fb419523f5d3d347f91f48f57b8b08a2dfc3c39b8a3f"}, + {file = "fonttools-4.55.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e4ae3592e62eba83cd2c4ccd9462dcfa603ff78e09110680a5444c6925d841"}, + {file = "fonttools-4.55.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62d65a3022c35e404d19ca14f291c89cc5890032ff04f6c17af0bd1927299674"}, + {file = "fonttools-4.55.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d342e88764fb201286d185093781bf6628bbe380a913c24adf772d901baa8276"}, + {file = "fonttools-4.55.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dd68c87a2bfe37c5b33bcda0fba39b65a353876d3b9006fde3adae31f97b3ef5"}, + {file = "fonttools-4.55.3-cp310-cp310-win32.whl", hash = "sha256:1bc7ad24ff98846282eef1cbeac05d013c2154f977a79886bb943015d2b1b261"}, + {file = "fonttools-4.55.3-cp310-cp310-win_amd64.whl", hash = "sha256:b54baf65c52952db65df39fcd4820668d0ef4766c0ccdf32879b77f7c804d5c5"}, + {file = "fonttools-4.55.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c4491699bad88efe95772543cd49870cf756b019ad56294f6498982408ab03e"}, + {file = "fonttools-4.55.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5323a22eabddf4b24f66d26894f1229261021dacd9d29e89f7872dd8c63f0b8b"}, + {file = "fonttools-4.55.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5480673f599ad410695ca2ddef2dfefe9df779a9a5cda89503881e503c9c7d90"}, + {file = "fonttools-4.55.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da9da6d65cd7aa6b0f806556f4985bcbf603bf0c5c590e61b43aa3e5a0f822d0"}, + {file = "fonttools-4.55.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e894b5bd60d9f473bed7a8f506515549cc194de08064d829464088d23097331b"}, + {file = "fonttools-4.55.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:aee3b57643827e237ff6ec6d28d9ff9766bd8b21e08cd13bff479e13d4b14765"}, + {file = "fonttools-4.55.3-cp311-cp311-win32.whl", hash = "sha256:eb6ca911c4c17eb51853143624d8dc87cdcdf12a711fc38bf5bd21521e79715f"}, + {file = "fonttools-4.55.3-cp311-cp311-win_amd64.whl", hash = "sha256:6314bf82c54c53c71805318fcf6786d986461622dd926d92a465199ff54b1b72"}, + {file = "fonttools-4.55.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f9e736f60f4911061235603a6119e72053073a12c6d7904011df2d8fad2c0e35"}, + {file = "fonttools-4.55.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7a8aa2c5e5b8b3bcb2e4538d929f6589a5c6bdb84fd16e2ed92649fb5454f11c"}, + {file = "fonttools-4.55.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07f8288aacf0a38d174445fc78377a97fb0b83cfe352a90c9d9c1400571963c7"}, + {file = "fonttools-4.55.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8d5e8916c0970fbc0f6f1bece0063363bb5857a7f170121a4493e31c3db3314"}, + {file = "fonttools-4.55.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ae3b6600565b2d80b7c05acb8e24d2b26ac407b27a3f2e078229721ba5698427"}, + {file = "fonttools-4.55.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54153c49913f45065c8d9e6d0c101396725c5621c8aee744719300f79771d75a"}, + {file = "fonttools-4.55.3-cp312-cp312-win32.whl", hash = "sha256:827e95fdbbd3e51f8b459af5ea10ecb4e30af50221ca103bea68218e9615de07"}, + {file = "fonttools-4.55.3-cp312-cp312-win_amd64.whl", hash = "sha256:e6e8766eeeb2de759e862004aa11a9ea3d6f6d5ec710551a88b476192b64fd54"}, + {file = "fonttools-4.55.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a430178ad3e650e695167cb53242dae3477b35c95bef6525b074d87493c4bf29"}, + {file = "fonttools-4.55.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:529cef2ce91dc44f8e407cc567fae6e49a1786f2fefefa73a294704c415322a4"}, + {file = "fonttools-4.55.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e75f12c82127486fac2d8bfbf5bf058202f54bf4f158d367e41647b972342ca"}, + {file = "fonttools-4.55.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859c358ebf41db18fb72342d3080bce67c02b39e86b9fbcf1610cca14984841b"}, + {file = "fonttools-4.55.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:546565028e244a701f73df6d8dd6be489d01617863ec0c6a42fa25bf45d43048"}, + {file = "fonttools-4.55.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:aca318b77f23523309eec4475d1fbbb00a6b133eb766a8bdc401faba91261abe"}, + {file = "fonttools-4.55.3-cp313-cp313-win32.whl", hash = "sha256:8c5ec45428edaa7022f1c949a632a6f298edc7b481312fc7dc258921e9399628"}, + {file = "fonttools-4.55.3-cp313-cp313-win_amd64.whl", hash = "sha256:11e5de1ee0d95af4ae23c1a138b184b7f06e0b6abacabf1d0db41c90b03d834b"}, + {file = "fonttools-4.55.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:caf8230f3e10f8f5d7593eb6d252a37caf58c480b19a17e250a63dad63834cf3"}, + {file = "fonttools-4.55.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b586ab5b15b6097f2fb71cafa3c98edfd0dba1ad8027229e7b1e204a58b0e09d"}, + {file = "fonttools-4.55.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8c2794ded89399cc2169c4d0bf7941247b8d5932b2659e09834adfbb01589aa"}, + {file = "fonttools-4.55.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf4fe7c124aa3f4e4c1940880156e13f2f4d98170d35c749e6b4f119a872551e"}, + {file = "fonttools-4.55.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:86721fbc389ef5cc1e2f477019e5069e8e4421e8d9576e9c26f840dbb04678de"}, + {file = "fonttools-4.55.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:89bdc5d88bdeec1b15af790810e267e8332d92561dce4f0748c2b95c9bdf3926"}, + {file = "fonttools-4.55.3-cp38-cp38-win32.whl", hash = "sha256:bc5dbb4685e51235ef487e4bd501ddfc49be5aede5e40f4cefcccabc6e60fb4b"}, + {file = "fonttools-4.55.3-cp38-cp38-win_amd64.whl", hash = "sha256:cd70de1a52a8ee2d1877b6293af8a2484ac82514f10b1c67c1c5762d38073e56"}, + {file = "fonttools-4.55.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bdcc9f04b36c6c20978d3f060e5323a43f6222accc4e7fcbef3f428e216d96af"}, + {file = "fonttools-4.55.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c3ca99e0d460eff46e033cd3992a969658c3169ffcd533e0a39c63a38beb6831"}, + {file = "fonttools-4.55.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22f38464daa6cdb7b6aebd14ab06609328fe1e9705bb0fcc7d1e69de7109ee02"}, + {file = "fonttools-4.55.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed63959d00b61959b035c7d47f9313c2c1ece090ff63afea702fe86de00dbed4"}, + {file = "fonttools-4.55.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5e8d657cd7326eeaba27de2740e847c6b39dde2f8d7cd7cc56f6aad404ddf0bd"}, + {file = "fonttools-4.55.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:fb594b5a99943042c702c550d5494bdd7577f6ef19b0bc73877c948a63184a32"}, + {file = "fonttools-4.55.3-cp39-cp39-win32.whl", hash = "sha256:dc5294a3d5c84226e3dbba1b6f61d7ad813a8c0238fceea4e09aa04848c3d851"}, + {file = "fonttools-4.55.3-cp39-cp39-win_amd64.whl", hash = "sha256:aedbeb1db64496d098e6be92b2e63b5fac4e53b1b92032dfc6988e1ea9134a4d"}, + {file = "fonttools-4.55.3-py3-none-any.whl", hash = "sha256:f412604ccbeee81b091b420272841e5ec5ef68967a9790e80bffd0e30b8e2977"}, + {file = "fonttools-4.55.3.tar.gz", hash = "sha256:3983313c2a04d6cc1fe9251f8fc647754cf49a61dac6cb1e7249ae67afaafc45"}, ] [package.extras] @@ -1587,99 +1565,114 @@ files = [ [[package]] name = "frozenlist" -version = "1.4.1" +version = "1.5.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" files = [ - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, - {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, - {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, - {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, - {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, - {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, - {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, - {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, - {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, - {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, - {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, - {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, - {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, ] [[package]] name = "fsspec" -version = "2024.6.1" +version = "2024.9.0" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, - {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"}, + {file = "fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b"}, + {file = "fsspec-2024.9.0.tar.gz", hash = "sha256:4b0afb90c2f21832df142f292649035d80b421f60a9e1c027802e5a0da2b04e8"}, ] [package.dependencies] @@ -1746,13 +1739,13 @@ test = ["build", "mypy", "pytest", "pytest-xdist", "ruff", "twine", "types-reque [[package]] name = "gitdb" -version = "4.0.11" +version = "4.0.12" description = "Git Object Database" optional = false python-versions = ">=3.7" files = [ - {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, - {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, + {file = "gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf"}, + {file = "gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571"}, ] [package.dependencies] @@ -1760,38 +1753,38 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.43" +version = "3.1.44" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, - {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, + {file = "GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110"}, + {file = "gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269"}, ] [package.dependencies] gitdb = ">=4.0.1,<5" [package.extras] -doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] +doc = ["sphinx (>=7.1.2,<7.2)", "sphinx-autodoc-typehints", "sphinx_rtd_theme"] test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] [[package]] name = "glfw" -version = "2.7.0" +version = "2.8.0" description = "A ctypes-based wrapper for GLFW3." optional = true python-versions = "*" files = [ - {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-macosx_10_6_intel.whl", hash = "sha256:bd82849edcceda4e262bd1227afaa74b94f9f0731c1197863cd25c15bfc613fc"}, - {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-macosx_11_0_arm64.whl", hash = "sha256:56ea163c964bb0bc336def2d6a6a1bd42f9db4b870ef834ac77d7b7ee68b8dfc"}, - {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2010_i686.whl", hash = "sha256:463aab9e5567c83d8120556b3a845807c60950ed0218fc1283368f46f5ece331"}, - {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2010_x86_64.whl", hash = "sha256:a6f54188dfc349e5426b0ada84843f6eb35a3811d8dbf57ae49c448e7d683bb4"}, - {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2014_aarch64.whl", hash = "sha256:e33568b0aba2045a3d7555f22fcf83fafcacc7c2fc4cb995741894ea51e43ab6"}, - {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2014_x86_64.whl", hash = "sha256:d8630dd9673860c427abde5b79bbc348e02eccde8a3f2a802c5a2a4fb5d79fb8"}, - {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-win32.whl", hash = "sha256:ff92d14ac1c7afa9c5deb495c335b485868709880e6e080e99ace7026d74c756"}, - {file = "glfw-2.7.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-win_amd64.whl", hash = "sha256:20d4b31a5a6a61fb787b25f8408204e0e248313cc500953071d13d30a2e5cc9d"}, - {file = "glfw-2.7.0.tar.gz", hash = "sha256:0e209ad38fa8c5be67ca590d7b17533d95ad1eb57d0a3f07b98131db69b79000"}, + {file = "glfw-2.8.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-macosx_10_6_intel.whl", hash = "sha256:28aaef2f022b57cd37525ad1d11ba9049931a273359964fb3cd36762eb093ed1"}, + {file = "glfw-2.8.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-macosx_11_0_arm64.whl", hash = "sha256:0239e478ff065719064fd1272ad29f8542e8178b11c614674bb930d85aa2d1e7"}, + {file = "glfw-2.8.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux2014_aarch64.whl", hash = "sha256:44b5313cffa4a037e8e2988bccba6fa7de0a24123509f4ccf3e49b831cf72abb"}, + {file = "glfw-2.8.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux2014_x86_64.whl", hash = "sha256:0fd982cf42a8e3137e05e392280826311961f9e99c82a0ccf22a63a0d2acd143"}, + {file = "glfw-2.8.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux_2_28_aarch64.whl", hash = "sha256:523e1fc03898bcb0711f78d6f21eee58c1f865bb764cbd36b9549580a4c43454"}, + {file = "glfw-2.8.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-manylinux_2_28_x86_64.whl", hash = "sha256:9cd20351b14587c6fe7063afb33cc03152e38dd2bff2b69613cb044bf3bdb635"}, + {file = "glfw-2.8.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-win32.whl", hash = "sha256:13a75d8d3e8d4bb24595f2354a392ccf7c54ddd20dacb88e1188b760f2a7714b"}, + {file = "glfw-2.8.0-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38.p39.p310.p311.p312.p313-none-win_amd64.whl", hash = "sha256:1416f10d2c2748c39910e9d9e6a10a5473743c5a745518061e4051be4c5caaa1"}, + {file = "glfw-2.8.0.tar.gz", hash = "sha256:90e90d328b0b26fed6e1631d21801e2d8a7a0c5dcb480e733c177567ec9666f0"}, ] [package.extras] @@ -2193,13 +2186,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.6" +version = "1.0.7" description = "A minimal low-level HTTP client." optional = true python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, - {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [package.dependencies] @@ -2214,13 +2207,13 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.27.2" +version = "0.28.1" description = "The next generation HTTP client." optional = true python-versions = ">=3.8" files = [ - {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, - {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -2228,7 +2221,6 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" -sniffio = "*" [package.extras] brotli = ["brotli", "brotlicffi"] @@ -2239,13 +2231,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "huggingface-hub" -version = "0.25.2" +version = "0.27.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25"}, - {file = "huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c"}, + {file = "huggingface_hub-0.27.1-py3-none-any.whl", hash = "sha256:1c5155ca7d60b60c2e2fc38cbb3ffb7f7c3adf48f824015b219af9061771daec"}, + {file = "huggingface_hub-0.27.1.tar.gz", hash = "sha256:c004463ca870283909d715d20f066ebd6968c2207dae9393fdffb3c1d4d8f98b"}, ] [package.dependencies] @@ -2260,16 +2252,16 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] -inference = ["aiohttp", "minijinja (>=1.0)"] -quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"] +inference = ["aiohttp"] +quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["safetensors[torch]", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] @@ -2291,13 +2283,13 @@ packaging = "*" [[package]] name = "identify" -version = "2.6.1" +version = "2.6.5" description = "File identification library for Python" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"}, - {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"}, + {file = "identify-2.6.5-py2.py3-none-any.whl", hash = "sha256:14181a47091eb75b337af4c23078c9d09225cd4c48929f521f3bf16b09d02566"}, + {file = "identify-2.6.5.tar.gz", hash = "sha256:c10b33f250e5bba374fae86fb57f3adcebf1161bce7cdf92031915fd480c13bc"}, ] [package.extras] @@ -2319,46 +2311,47 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2 [[package]] name = "imagecodecs" -version = "2024.9.22" +version = "2024.12.30" description = "Image transformation, compression, and decompression codecs" optional = true python-versions = ">=3.9" files = [ - {file = "imagecodecs-2024.9.22-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:4cc21a59c6eb409bc3930dc642039eb1ff67a36b3f8d9e8c229eaede6b26557e"}, - {file = "imagecodecs-2024.9.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:321ff2e6907820bdbf8350d20733f5068bf53513476d522028117aefab55fc03"}, - {file = "imagecodecs-2024.9.22-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1608015c1e182e103d8b2ecda4a0e54595c3f846ca76fa484302283f24f3e7f"}, - {file = "imagecodecs-2024.9.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432e518d74ee5b9ac7d5b1022ed29a9fdabd0eab18201220e742fde631962cf8"}, - {file = "imagecodecs-2024.9.22-cp310-cp310-win32.whl", hash = "sha256:50d14caef565ccb4bdeb60e045b61f5d899d3caaf18e980923cdb50a181e4db2"}, - {file = "imagecodecs-2024.9.22-cp310-cp310-win_amd64.whl", hash = "sha256:d7220e9134c3abda5e9f720dcd810031b01b8ba1a71faa8055ab6b43b5056109"}, - {file = "imagecodecs-2024.9.22-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:47259f811aea089d7cdf369e6617cb336b67359835102a45ee2a49f2a8e20624"}, - {file = "imagecodecs-2024.9.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:52007be4bc809104e5660805725196255cc091c248e465f588f9b4506544b886"}, - {file = "imagecodecs-2024.9.22-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db9bcb5abd23522b119f619810cfa0217bf4756d1b8c1146a6a81635d7fb98d1"}, - {file = "imagecodecs-2024.9.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:606f3c31387aa9019007cdf7e5e3fcfc4d04fc158f56a8e94340018988f5af69"}, - {file = "imagecodecs-2024.9.22-cp311-cp311-win32.whl", hash = "sha256:180295983edbdd1220099ebe33718876d6cea6c68d9442a3771bba91de0be8c7"}, - {file = "imagecodecs-2024.9.22-cp311-cp311-win_amd64.whl", hash = "sha256:915397c69f986da92608ec4af331b9682ad933f3d645a4e9f7b106530e57683c"}, - {file = "imagecodecs-2024.9.22-cp311-cp311-win_arm64.whl", hash = "sha256:15e7b21488d50f95980b1f865983a6963dad1c752d51cef5bfa76bdd1a325935"}, - {file = "imagecodecs-2024.9.22-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:ba7e98ad714100ae892aeadea5dd636e31eb95663f7e71fb3654fc3399f8a312"}, - {file = "imagecodecs-2024.9.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d1b59ffeaf1fdc06c5da1b8faf34a5f74f914c55a7148060b1746f7684552b6f"}, - {file = "imagecodecs-2024.9.22-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9646cd9e8933c9a181387b159392d57832fb4f4b444f2d475a6ef7ba0ea8ef8"}, - {file = "imagecodecs-2024.9.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd9c62286c5aa9cdd73551c7e55c7db04424968304e53ec9240915edb9f30e23"}, - {file = "imagecodecs-2024.9.22-cp312-cp312-win32.whl", hash = "sha256:15959cf31ea8070741318fd0d5748b734e9001b83afd8bab6fe15236c27acba0"}, - {file = "imagecodecs-2024.9.22-cp312-cp312-win_amd64.whl", hash = "sha256:44d51f5aae669fe1eba1474144c042fbb56f4286c072f37aa86941fed865270a"}, - {file = "imagecodecs-2024.9.22-cp312-cp312-win_arm64.whl", hash = "sha256:aa5f47ebef13f4c55b1ac24fafef5e7b340963a6a73af9d2cef2f9bfdf58bf97"}, - {file = "imagecodecs-2024.9.22-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:d4bd89bc86c74439a7a828ce62e28d575db125f25cadc31bd877e2616ace2f0d"}, - {file = "imagecodecs-2024.9.22-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c8c37f8cdeedd0e01f55b9588e82b2c7059bc1a0167ed8dd05166cad674bfbde"}, - {file = "imagecodecs-2024.9.22-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9752c9af72ba372bbb0afca8a94f76b3096c1c54dcdb5cf18156fdc6b73403d2"}, - {file = "imagecodecs-2024.9.22-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9ddd053c7f262ca1333fc23f45ece7b375ddca31a0761c46e1197691e895bc3"}, - {file = "imagecodecs-2024.9.22-cp313-cp313-win32.whl", hash = "sha256:a5dc99af846febbaaf328f03518c2e2b0d0dfbe0a1a7b781361550605c7d4c58"}, - {file = "imagecodecs-2024.9.22-cp313-cp313-win_amd64.whl", hash = "sha256:c8951d3449f81aaf0664a8f575d431906134973f9bec93073dfc8d8247db0a1a"}, - {file = "imagecodecs-2024.9.22-cp313-cp313-win_arm64.whl", hash = "sha256:ead06b23300b9f1958026d103aafe8eba272ff40abcb8c5db02d7711a5992cc9"}, - {file = "imagecodecs-2024.9.22-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:fa72958dee65ce40e25f9536408b04f72a95004fe4630faa7042cf6c6c29a1d1"}, - {file = "imagecodecs-2024.9.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4518e0edb5b369415bb7016097ff9cd1b2aed7a9960e21d2e616cf7e066af3fe"}, - {file = "imagecodecs-2024.9.22-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fbbe6f5929838adc954acdd51820602d1dfd8235f8b3eb3764be58e76c6626b7"}, - {file = "imagecodecs-2024.9.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58dbee11a50f2bc2e8c81f3bc1887f1b1328d61f09d9d8caa2e4050ae635fbe9"}, - {file = "imagecodecs-2024.9.22-cp39-cp39-win32.whl", hash = "sha256:fcbbba54d0d61b6ca188d28695b244c4c5a9caaf848173015d81c91d3c0d47cb"}, - {file = "imagecodecs-2024.9.22-cp39-cp39-win_amd64.whl", hash = "sha256:3e55abc2934442fe3055b4f8943ebe8ff6c7eb57f9f895c80ca1732f38632d9f"}, - {file = "imagecodecs-2024.9.22-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ec3ce35e6131853beb8a39e47e59b183d034c6e9476fafda38c7ab4d8d17e1f4"}, - {file = "imagecodecs-2024.9.22.tar.gz", hash = "sha256:fea0801b4008d25e971918d991397a351bbe76276cfa98eed2de54cb87e894a3"}, + {file = "imagecodecs-2024.12.30-1-cp39-cp39-win32.whl", hash = "sha256:631b541c7113702298153c075523598033bb7c802cf6151ac5ce94bb06d11189"}, + {file = "imagecodecs-2024.12.30-1-cp39-cp39-win_amd64.whl", hash = "sha256:3e3ca8531bb5f8b3b3c72bf0ef0e5d596e37b0a46dc4e37265039f3728ead1ae"}, + {file = "imagecodecs-2024.12.30-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:cfcdf3e8388b9f483ecce385ac455cbb82a698a1c03f5117f5fa7b88344d815b"}, + {file = "imagecodecs-2024.12.30-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1da60b181fc5948e5ae82ba990c9124dde664c61b187ba6f250b50bdac159d98"}, + {file = "imagecodecs-2024.12.30-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a71a74b0759071b685fbf79f7490413f30c2fc26961b6a7ff761f502a016803e"}, + {file = "imagecodecs-2024.12.30-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8973fbc8d328f2d2c79961e09211acaea3be50805d612ccb48d8019c5a948fec"}, + {file = "imagecodecs-2024.12.30-cp310-cp310-win32.whl", hash = "sha256:f9d2b3ea6157f5f91a033135bd638ceeaa193db92f6c448d793bf81c7d11f1f8"}, + {file = "imagecodecs-2024.12.30-cp310-cp310-win_amd64.whl", hash = "sha256:7d799518ea5a5fa245ba3c4e43edf86ac8fdbcae5a3d1de22c19120aaf426390"}, + {file = "imagecodecs-2024.12.30-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:ad6d8d4c61df08824a93d441b41f0b62ed03281b73bfc5a3f1590f3be7629321"}, + {file = "imagecodecs-2024.12.30-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dcb40f08a1a74ea97317c032f30688a71c85b8f8b9414f0ace824862e21ead7b"}, + {file = "imagecodecs-2024.12.30-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff3fce17a03db6a7a9c3b6d2860815b697d2a6756f246ffdeb5462fcc2914fba"}, + {file = "imagecodecs-2024.12.30-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:227bdfc3476f57862e0602ab921a8f2bd1017c70227e122d42f1cdf6d1117af9"}, + {file = "imagecodecs-2024.12.30-cp311-cp311-win32.whl", hash = "sha256:e722ee1bfaaf8173a37cc1dc11d7033e097dffc7d6f3d54a5e2b7333425b7a64"}, + {file = "imagecodecs-2024.12.30-cp311-cp311-win_amd64.whl", hash = "sha256:7acf874924cc49bb501c9194e6d9d12db95c83a06cd9bc3f9ed1adfe5f1b493f"}, + {file = "imagecodecs-2024.12.30-cp311-cp311-win_arm64.whl", hash = "sha256:8bd17f6dcd0cc5b7bffcebd0d965d0158957c8fe0fe97e41c7b69e1284787cbf"}, + {file = "imagecodecs-2024.12.30-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:d296f144b2f2ef1c33fd397941a77f2dc79ebdf9a9169a377eccca8bb074c5c9"}, + {file = "imagecodecs-2024.12.30-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:65d480431a2e99a9ecfb340f960e98ed70a634cd617dac274fddd855a9cac9a5"}, + {file = "imagecodecs-2024.12.30-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4018bccb0c905c9e1f7a84bed23ca21ad60ec05771ff7fe904d5bd264743cbd2"}, + {file = "imagecodecs-2024.12.30-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b03211f1853a184a0f5e78eef5b5d121ba59c6b00cf73abb83c1561cc7edafb6"}, + {file = "imagecodecs-2024.12.30-cp312-cp312-win32.whl", hash = "sha256:3971078c5f2960c5086c5d63a0bb6db53000b959bc87ab418e4c7b6e715116bf"}, + {file = "imagecodecs-2024.12.30-cp312-cp312-win_amd64.whl", hash = "sha256:758ba651f74f0c6fc86e6d741e385121721610830eb4421703fbb69c2638ce8a"}, + {file = "imagecodecs-2024.12.30-cp312-cp312-win_arm64.whl", hash = "sha256:1dd4d32b6c8cdc3117687324f5b79b14e0378d496289b20dd82c724c56e7b082"}, + {file = "imagecodecs-2024.12.30-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:cf4431927ebf59278d3f10047e164f2977b5e4f47a404dabd8a99f7490cc12d8"}, + {file = "imagecodecs-2024.12.30-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f381fb82d10b7d374d744e59088763c0d8a7dc299c90f573000fbb2e5bb32b39"}, + {file = "imagecodecs-2024.12.30-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54d5dc773ffdcacf43c8755725a232f474f81bc948828740b327b38bc4968685"}, + {file = "imagecodecs-2024.12.30-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96c4993156b02e624bc4f0477b27d62e56d38deb4dbaf7c66def9bdde3a63a04"}, + {file = "imagecodecs-2024.12.30-cp313-cp313-win32.whl", hash = "sha256:2c0bad37325290dd208a6b5024bd1afb258514ffe32ca385a20ef3ff35a6b0fb"}, + {file = "imagecodecs-2024.12.30-cp313-cp313-win_amd64.whl", hash = "sha256:08a911ccf658cbeed43fc58d4b52b61adc4917f654e9daf9a3372e98d75e6f07"}, + {file = "imagecodecs-2024.12.30-cp313-cp313-win_arm64.whl", hash = "sha256:ee494925e96b405e7452a658c0049230e7541b364cba837bc54d0f57e4a065d1"}, + {file = "imagecodecs-2024.12.30-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:834e088c915a9f2231ef1d49681560e50ba98260df1576f79b48ba5a1a7d0679"}, + {file = "imagecodecs-2024.12.30-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8c6e02e8081ac947d093f8578cf87c07c1b34231d3adf1120571f3c8e68fa8ce"}, + {file = "imagecodecs-2024.12.30-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:993dd1aa2fea1bd0956bc35c57523e8d327b7218d308c161dc9351541f7320b9"}, + {file = "imagecodecs-2024.12.30-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e92b74e23ccb9fa58158bb8735e555a7aca97455e791f6f9fddce198dfde674c"}, + {file = "imagecodecs-2024.12.30-cp39-cp39-win32.whl", hash = "sha256:a7e8e42aeaffe8fcfb8c6052e48b49414840aaa2bc2d48abc59014db61a76129"}, + {file = "imagecodecs-2024.12.30-cp39-cp39-win_amd64.whl", hash = "sha256:422b22ed51c5de6127e388db765f27c1cefe562dda4f866f6230c471b12b0676"}, + {file = "imagecodecs-2024.12.30.tar.gz", hash = "sha256:4c88f745719313cabea6dd5f1e69c9f93346de91c0f1e770eb969f2232247f21"}, ] [package.dependencies] @@ -2370,13 +2363,13 @@ test = ["bitshuffle", "blosc", "blosc2", "czifile", "lz4", "numcodecs", "pyliblz [[package]] name = "imageio" -version = "2.35.1" +version = "2.36.1" description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "imageio-2.35.1-py3-none-any.whl", hash = "sha256:6eb2e5244e7a16b85c10b5c2fe0f7bf961b40fcb9f1a9fd1bd1d2c2f8fb3cd65"}, - {file = "imageio-2.35.1.tar.gz", hash = "sha256:4952dfeef3c3947957f6d5dedb1f4ca31c6e509a476891062396834048aeed2a"}, + {file = "imageio-2.36.1-py3-none-any.whl", hash = "sha256:20abd2cae58e55ca1af8a8dcf43293336a59adf0391f1917bf8518633cfc2cdf"}, + {file = "imageio-2.36.1.tar.gz", hash = "sha256:e4e1d231f47f9a9e16100b0f7ce1a86e8856fb4d1c0fa2c4365a316f1746be62"}, ] [package.dependencies] @@ -2386,8 +2379,8 @@ pillow = ">=8.3.2" psutil = {version = "*", optional = true, markers = "extra == \"ffmpeg\""} [package.extras] -all-plugins = ["astropy", "av", "imageio-ffmpeg", "psutil", "tifffile"] -all-plugins-pypy = ["av", "imageio-ffmpeg", "psutil", "tifffile"] +all-plugins = ["astropy", "av", "imageio-ffmpeg", "numpy (>2)", "pillow-heif", "psutil", "rawpy", "tifffile"] +all-plugins-pypy = ["av", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] build = ["wheel"] dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] @@ -2519,13 +2512,13 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio [[package]] name = "ipython" -version = "8.28.0" +version = "8.31.0" description = "IPython: Productive Interactive Computing" optional = true python-versions = ">=3.10" files = [ - {file = "ipython-8.28.0-py3-none-any.whl", hash = "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35"}, - {file = "ipython-8.28.0.tar.gz", hash = "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a"}, + {file = "ipython-8.31.0-py3-none-any.whl", hash = "sha256:46ec58f8d3d076a61d128fe517a51eb730e3aaf0c184ea8c17d16e366660c6a6"}, + {file = "ipython-8.31.0.tar.gz", hash = "sha256:b6a2274606bec6166405ff05e54932ed6e5cfecaca1fc05f2cacde7bb074d70b"}, ] [package.dependencies] @@ -2534,16 +2527,16 @@ exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} jedi = ">=0.16" matplotlib-inline = "*" pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} -prompt-toolkit = ">=3.0.41,<3.1.0" +prompt_toolkit = ">=3.0.41,<3.1.0" pygments = ">=2.4.0" -stack-data = "*" +stack_data = "*" traitlets = ">=5.13.0" -typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} +typing_extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} [package.extras] all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] black = ["black"] -doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli", "typing-extensions"] +doc = ["docrepr", "exceptiongroup", "intersphinx_registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli", "typing_extensions"] kernel = ["ipykernel"] matplotlib = ["matplotlib"] nbconvert = ["nbconvert"] @@ -2602,32 +2595,32 @@ files = [ [[package]] name = "jedi" -version = "0.19.1" +version = "0.19.2" description = "An autocompletion tool for Python that can be used for text editors." optional = true python-versions = ">=3.6" files = [ - {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, - {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, ] [package.dependencies] -parso = ">=0.8.3,<0.9.0" +parso = ">=0.8.4,<0.9.0" [package.extras] docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] [[package]] name = "jinja2" -version = "3.1.4" +version = "3.1.5" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, + {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, + {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, ] [package.dependencies] @@ -2638,15 +2631,18 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "json5" -version = "0.9.25" +version = "0.10.0" description = "A Python implementation of the JSON5 data format." optional = true -python-versions = ">=3.8" +python-versions = ">=3.8.0" files = [ - {file = "json5-0.9.25-py3-none-any.whl", hash = "sha256:34ed7d834b1341a86987ed52f3f76cd8ee184394906b6e22a1e0deb9ab294e8f"}, - {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, + {file = "json5-0.10.0-py3-none-any.whl", hash = "sha256:19b23410220a7271e8377f81ba8aacba2fdd56947fbb137ee5977cbe1f5e8dfa"}, + {file = "json5-0.10.0.tar.gz", hash = "sha256:e66941c8f0a02026943c52c2eb34ebeb2a6f819a0be05920a6f5243cd30fd559"}, ] +[package.extras] +dev = ["build (==1.2.2.post1)", "coverage (==7.5.3)", "mypy (==1.13.0)", "pip (==24.3.1)", "pylint (==3.2.3)", "ruff (==0.7.3)", "twine (==5.1.1)", "uv (==0.5.1)"] + [[package]] name = "jsonlines" version = "4.0.0" @@ -2801,13 +2797,13 @@ test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout" [[package]] name = "jupyter-events" -version = "0.10.0" +version = "0.11.0" description = "Jupyter Event System library" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "jupyter_events-0.10.0-py3-none-any.whl", hash = "sha256:4b72130875e59d57716d327ea70d3ebc3af1944d3717e5a498b8a06c6c159960"}, - {file = "jupyter_events-0.10.0.tar.gz", hash = "sha256:670b8229d3cc882ec782144ed22e0d29e1c2d639263f92ca8383e66682845e22"}, + {file = "jupyter_events-0.11.0-py3-none-any.whl", hash = "sha256:36399b41ce1ca45fe8b8271067d6a140ffa54cec4028e95491c93b78a855cacf"}, + {file = "jupyter_events-0.11.0.tar.gz", hash = "sha256:c0bc56a37aac29c1fbc3bcfbddb8c8c49533f9cf11f1c4e6adadba936574ab90"}, ] [package.dependencies] @@ -2821,7 +2817,7 @@ traitlets = ">=5.3" [package.extras] cli = ["click", "rich"] -docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"] +docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme (>=0.16)", "sphinx (>=8)", "sphinxcontrib-spelling"] test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "rich"] [[package]] @@ -2840,13 +2836,13 @@ jupyter-server = ">=1.1.2" [[package]] name = "jupyter-server" -version = "2.14.2" +version = "2.15.0" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "jupyter_server-2.14.2-py3-none-any.whl", hash = "sha256:47ff506127c2f7851a17bf4713434208fc490955d0e8632e95014a9a9afbeefd"}, - {file = "jupyter_server-2.14.2.tar.gz", hash = "sha256:66095021aa9638ced276c248b1d81862e4c50f292d575920bbe960de1c56b12b"}, + {file = "jupyter_server-2.15.0-py3-none-any.whl", hash = "sha256:872d989becf83517012ee669f09604aa4a28097c0bd90b2f424310156c2cdae3"}, + {file = "jupyter_server-2.15.0.tar.gz", hash = "sha256:9d446b8697b4f7337a1b7cdcac40778babdd93ba614b6d68ab1c0c918f1c4084"}, ] [package.dependencies] @@ -2855,7 +2851,7 @@ argon2-cffi = ">=21.1" jinja2 = ">=3.0.3" jupyter-client = ">=7.4.4" jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -jupyter-events = ">=0.9.0" +jupyter-events = ">=0.11.0" jupyter-server-terminals = ">=0.4.4" nbconvert = ">=6.4.4" nbformat = ">=5.3.0" @@ -2895,13 +2891,13 @@ test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (> [[package]] name = "jupyterlab" -version = "4.2.5" +version = "4.3.4" description = "JupyterLab computational environment" optional = true python-versions = ">=3.8" files = [ - {file = "jupyterlab-4.2.5-py3-none-any.whl", hash = "sha256:73b6e0775d41a9fee7ee756c80f58a6bed4040869ccc21411dc559818874d321"}, - {file = "jupyterlab-4.2.5.tar.gz", hash = "sha256:ae7f3a1b8cb88b4f55009ce79fa7c06f99d70cd63601ee4aa91815d054f46f75"}, + {file = "jupyterlab-4.3.4-py3-none-any.whl", hash = "sha256:b754c2601c5be6adf87cb5a1d8495d653ffb945f021939f77776acaa94dae952"}, + {file = "jupyterlab-4.3.4.tar.gz", hash = "sha256:f0bb9b09a04766e3423cccc2fc23169aa2ffedcdf8713e9e0fb33cac0b6859d0"}, ] [package.dependencies] @@ -2915,15 +2911,15 @@ jupyter-server = ">=2.4.0,<3" jupyterlab-server = ">=2.27.1,<3" notebook-shim = ">=0.2" packaging = "*" -setuptools = ">=40.1.0" +setuptools = ">=40.8.0" tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""} tornado = ">=6.2.0" traitlets = "*" [package.extras] -dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.3.5)"] -docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<7.3.0)", "sphinx-copybutton"] -docs-screenshots = ["altair (==5.3.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.2)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.1.post2)", "matplotlib (==3.8.3)", "nbconvert (>=7.0.0)", "pandas (==2.2.1)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"] +dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.6.9)"] +docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<8.1.0)", "sphinx-copybutton"] +docs-screenshots = ["altair (==5.4.1)", "ipython (==8.16.1)", "ipywidgets (==8.1.5)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.2.post3)", "matplotlib (==3.9.2)", "nbconvert (>=7.0.0)", "pandas (==2.2.3)", "scipy (==1.14.1)", "vega-datasets (==0.9.0)"] test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"] upgrade-extension = ["copier (>=9,<10)", "jinja2-time (<0.3)", "pydantic (<3.0)", "pyyaml-include (<3.0)", "tomli-w (<2.0)"] @@ -2976,125 +2972,91 @@ files = [ [[package]] name = "kiwisolver" -version = "1.4.7" +version = "1.4.8" description = "A fast implementation of the Cassowary constraint solver" optional = true -python-versions = ">=3.8" +python-versions = ">=3.10" files = [ - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, - {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db"}, + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b"}, + {file = "kiwisolver-1.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed"}, + {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605"}, + {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e"}, + {file = "kiwisolver-1.4.8-cp310-cp310-win_amd64.whl", hash = "sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751"}, + {file = "kiwisolver-1.4.8-cp310-cp310-win_arm64.whl", hash = "sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561"}, + {file = "kiwisolver-1.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6"}, + {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc"}, + {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67"}, + {file = "kiwisolver-1.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34"}, + {file = "kiwisolver-1.4.8-cp311-cp311-win_arm64.whl", hash = "sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31"}, + {file = "kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d"}, + {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8"}, + {file = "kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50"}, + {file = "kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1"}, + {file = "kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc"}, + {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957"}, + {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb"}, + {file = "kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2"}, + {file = "kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90"}, + {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b"}, + {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b"}, + {file = "kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e"}, ] [[package]] @@ -3162,32 +3124,32 @@ test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] [[package]] name = "llvmlite" -version = "0.43.0" +version = "0.44.0rc2" description = "lightweight wrapper around basic LLVM functionality" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "llvmlite-0.43.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a289af9a1687c6cf463478f0fa8e8aa3b6fb813317b0d70bf1ed0759eab6f761"}, - {file = "llvmlite-0.43.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d4fd101f571a31acb1559ae1af30f30b1dc4b3186669f92ad780e17c81e91bc"}, - {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d434ec7e2ce3cc8f452d1cd9a28591745de022f931d67be688a737320dfcead"}, - {file = "llvmlite-0.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6912a87782acdff6eb8bf01675ed01d60ca1f2551f8176a300a886f09e836a6a"}, - {file = "llvmlite-0.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:14f0e4bf2fd2d9a75a3534111e8ebeb08eda2f33e9bdd6dfa13282afacdde0ed"}, - {file = "llvmlite-0.43.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8d0618cb9bfe40ac38a9633f2493d4d4e9fcc2f438d39a4e854f39cc0f5f98"}, - {file = "llvmlite-0.43.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0a9a1a39d4bf3517f2af9d23d479b4175ead205c592ceeb8b89af48a327ea57"}, - {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1da416ab53e4f7f3bc8d4eeba36d801cc1894b9fbfbf2022b29b6bad34a7df2"}, - {file = "llvmlite-0.43.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977525a1e5f4059316b183fb4fd34fa858c9eade31f165427a3977c95e3ee749"}, - {file = "llvmlite-0.43.0-cp311-cp311-win_amd64.whl", hash = "sha256:d5bd550001d26450bd90777736c69d68c487d17bf371438f975229b2b8241a91"}, - {file = "llvmlite-0.43.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f99b600aa7f65235a5a05d0b9a9f31150c390f31261f2a0ba678e26823ec38f7"}, - {file = "llvmlite-0.43.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:35d80d61d0cda2d767f72de99450766250560399edc309da16937b93d3b676e7"}, - {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eccce86bba940bae0d8d48ed925f21dbb813519169246e2ab292b5092aba121f"}, - {file = "llvmlite-0.43.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df6509e1507ca0760787a199d19439cc887bfd82226f5af746d6977bd9f66844"}, - {file = "llvmlite-0.43.0-cp312-cp312-win_amd64.whl", hash = "sha256:7a2872ee80dcf6b5dbdc838763d26554c2a18aa833d31a2635bff16aafefb9c9"}, - {file = "llvmlite-0.43.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cd2a7376f7b3367019b664c21f0c61766219faa3b03731113ead75107f3b66c"}, - {file = "llvmlite-0.43.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18e9953c748b105668487b7c81a3e97b046d8abf95c4ddc0cd3c94f4e4651ae8"}, - {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74937acd22dc11b33946b67dca7680e6d103d6e90eeaaaf932603bec6fe7b03a"}, - {file = "llvmlite-0.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9efc739cc6ed760f795806f67889923f7274276f0eb45092a1473e40d9b867"}, - {file = "llvmlite-0.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:47e147cdda9037f94b399bf03bfd8a6b6b1f2f90be94a454e3386f006455a9b4"}, - {file = "llvmlite-0.43.0.tar.gz", hash = "sha256:ae2b5b5c3ef67354824fb75517c8db5fbe93bc02cd9671f3c62271626bc041d5"}, + {file = "llvmlite-0.44.0rc2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:2361232623575929bdebfe863f7d8d3cdb9d11bc797f35471ff279066ac442de"}, + {file = "llvmlite-0.44.0rc2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdd695293102d0ffe71d4b82a9d62614fd85e98348ad0018cb66768992afa974"}, + {file = "llvmlite-0.44.0rc2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3060c556ca075066f1503542051b4dd17224bff0cc6eef882225275ed76106f6"}, + {file = "llvmlite-0.44.0rc2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36e25e70ffa24c7250caf8fb44fdca6cf999c94be411fffa391a131b8715aea3"}, + {file = "llvmlite-0.44.0rc2-cp310-cp310-win_amd64.whl", hash = "sha256:769b045c9214b1952be8973ba1b243b2d65f177e844e117dc0b7e2a82c76b741"}, + {file = "llvmlite-0.44.0rc2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:19f432d8f2d399f735b783646d109553fdf4020d80f3d0dd7b01eb57a8084b3c"}, + {file = "llvmlite-0.44.0rc2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a18c621b625aef39b0be504c89c382464488af0a900fb34a4d7df4a0afd10d88"}, + {file = "llvmlite-0.44.0rc2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:784eecc2ae4cb7cafc95268fdaea766d0b97b42af0e69da1c21b2049b268b5ba"}, + {file = "llvmlite-0.44.0rc2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4d7108d74c7ddeb87881c4fcfef55a0e413a9ddb3ee065b8d71b30c4b077bc10"}, + {file = "llvmlite-0.44.0rc2-cp311-cp311-win_amd64.whl", hash = "sha256:abc082dea48a999ad6af894d6b9a99869a8a43ccdfbc2d38d33b75a4f0d8dc56"}, + {file = "llvmlite-0.44.0rc2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:4a3022e67e52e01c69145499295d726eb44f8ae14e46a2b87fd334fd5430b14c"}, + {file = "llvmlite-0.44.0rc2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b775e2c238574fc3b0c58d972db632a85523c8a39d9c45448b272c5c0ddd13b5"}, + {file = "llvmlite-0.44.0rc2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e1ff57aa5f7745a4a776c8e91511f4b27614cf942b34bda7739453df8aee8ba"}, + {file = "llvmlite-0.44.0rc2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f2a09df18b9af81157cf889b7db9907708e47daf1e8b939d394a6e1f785c9d41"}, + {file = "llvmlite-0.44.0rc2-cp312-cp312-win_amd64.whl", hash = "sha256:e7fedaaa41427bb7f87fa7262e3d1b04d7f279f3f571f7370442f115bf29a5b8"}, + {file = "llvmlite-0.44.0rc2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:90469b05004a04435377a3de6aee04e12a4f6d2c9a7d62e262681b34a28c7080"}, + {file = "llvmlite-0.44.0rc2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cf3040e9e00e22fd62750e1e045393f0d28ee1c7a92adfc7486b6701c3212a49"}, + {file = "llvmlite-0.44.0rc2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87e9fc737b1323b76a51f18620c41577cbcb5c66f8c83a546431ea562ae1b9d9"}, + {file = "llvmlite-0.44.0rc2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4450f1086db328cdd3a4a1aa9960273730646ee6ff3d4cb99e6e5e17e7872e47"}, + {file = "llvmlite-0.44.0rc2-cp313-cp313-win_amd64.whl", hash = "sha256:04fde53f84591e13fcd3e89a38159017b3536902c62594418584c796d3d38fc0"}, + {file = "llvmlite-0.44.0rc2.tar.gz", hash = "sha256:7731d125071842c2465ecd9fd59736da7ae70d8c6d04566ede98296e8779a8ed"}, ] [[package]] @@ -3370,121 +3332,115 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "3.0.1" +version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" files = [ - {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-win32.whl", hash = "sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-win32.whl", hash = "sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-win32.whl", hash = "sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-win32.whl", hash = "sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-win32.whl", hash = "sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-win32.whl", hash = "sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b"}, - {file = "markupsafe-3.0.1.tar.gz", hash = "sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, ] [[package]] name = "matplotlib" -version = "3.9.2" +version = "3.10.0" description = "Python plotting package" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "matplotlib-3.9.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9d78bbc0cbc891ad55b4f39a48c22182e9bdaea7fc0e5dbd364f49f729ca1bbb"}, - {file = "matplotlib-3.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c375cc72229614632c87355366bdf2570c2dac01ac66b8ad048d2dabadf2d0d4"}, - {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d94ff717eb2bd0b58fe66380bd8b14ac35f48a98e7c6765117fe67fb7684e64"}, - {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab68d50c06938ef28681073327795c5db99bb4666214d2d5f880ed11aeaded66"}, - {file = "matplotlib-3.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:65aacf95b62272d568044531e41de26285d54aec8cb859031f511f84bd8b495a"}, - {file = "matplotlib-3.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:3fd595f34aa8a55b7fc8bf9ebea8aa665a84c82d275190a61118d33fbc82ccae"}, - {file = "matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772"}, - {file = "matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41"}, - {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f"}, - {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447"}, - {file = "matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e"}, - {file = "matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7"}, - {file = "matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9"}, - {file = "matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d"}, - {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7"}, - {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c"}, - {file = "matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e"}, - {file = "matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3"}, - {file = "matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9"}, - {file = "matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa"}, - {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b"}, - {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413"}, - {file = "matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b"}, - {file = "matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49"}, - {file = "matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03"}, - {file = "matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30"}, - {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51"}, - {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c"}, - {file = "matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e"}, - {file = "matplotlib-3.9.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cef2a73d06601437be399908cf13aee74e86932a5ccc6ccdf173408ebc5f6bb2"}, - {file = "matplotlib-3.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0830e188029c14e891fadd99702fd90d317df294c3298aad682739c5533721a"}, - {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ba9c1299c920964e8d3857ba27173b4dbb51ca4bab47ffc2c2ba0eb5e2cbc5"}, - {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd93b91ab47a3616b4d3c42b52f8363b88ca021e340804c6ab2536344fad9ca"}, - {file = "matplotlib-3.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6d1ce5ed2aefcdce11904fc5bbea7d9c21fff3d5f543841edf3dea84451a09ea"}, - {file = "matplotlib-3.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:b2696efdc08648536efd4e1601b5fd491fd47f4db97a5fbfd175549a7365c1b2"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d52a3b618cb1cbb769ce2ee1dcdb333c3ab6e823944e9a2d36e37253815f9556"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:039082812cacd6c6bec8e17a9c1e6baca230d4116d522e81e1f63a74d01d2e21"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6758baae2ed64f2331d4fd19be38b7b4eae3ecec210049a26b6a4f3ae1c85dcc"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:050598c2b29e0b9832cde72bcf97627bf00262adbc4a54e2b856426bb2ef0697"}, - {file = "matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92"}, + {file = "matplotlib-3.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2c5829a5a1dd5a71f0e31e6e8bb449bc0ee9dbfb05ad28fc0c6b55101b3a4be6"}, + {file = "matplotlib-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2a43cbefe22d653ab34bb55d42384ed30f611bcbdea1f8d7f431011a2e1c62e"}, + {file = "matplotlib-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:607b16c8a73943df110f99ee2e940b8a1cbf9714b65307c040d422558397dac5"}, + {file = "matplotlib-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01d2b19f13aeec2e759414d3bfe19ddfb16b13a1250add08d46d5ff6f9be83c6"}, + {file = "matplotlib-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e6c6461e1fc63df30bf6f80f0b93f5b6784299f721bc28530477acd51bfc3d1"}, + {file = "matplotlib-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:994c07b9d9fe8d25951e3202a68c17900679274dadfc1248738dcfa1bd40d7f3"}, + {file = "matplotlib-3.10.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:fd44fc75522f58612ec4a33958a7e5552562b7705b42ef1b4f8c0818e304a363"}, + {file = "matplotlib-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c58a9622d5dbeb668f407f35f4e6bfac34bb9ecdcc81680c04d0258169747997"}, + {file = "matplotlib-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:845d96568ec873be63f25fa80e9e7fae4be854a66a7e2f0c8ccc99e94a8bd4ef"}, + {file = "matplotlib-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5439f4c5a3e2e8eab18e2f8c3ef929772fd5641876db71f08127eed95ab64683"}, + {file = "matplotlib-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4673ff67a36152c48ddeaf1135e74ce0d4bce1bbf836ae40ed39c29edf7e2765"}, + {file = "matplotlib-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:7e8632baebb058555ac0cde75db885c61f1212e47723d63921879806b40bec6a"}, + {file = "matplotlib-3.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4659665bc7c9b58f8c00317c3c2a299f7f258eeae5a5d56b4c64226fca2f7c59"}, + {file = "matplotlib-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d44cb942af1693cced2604c33a9abcef6205601c445f6d0dc531d813af8a2f5a"}, + {file = "matplotlib-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a994f29e968ca002b50982b27168addfd65f0105610b6be7fa515ca4b5307c95"}, + {file = "matplotlib-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b0558bae37f154fffda54d779a592bc97ca8b4701f1c710055b609a3bac44c8"}, + {file = "matplotlib-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:503feb23bd8c8acc75541548a1d709c059b7184cde26314896e10a9f14df5f12"}, + {file = "matplotlib-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:c40ba2eb08b3f5de88152c2333c58cee7edcead0a2a0d60fcafa116b17117adc"}, + {file = "matplotlib-3.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96f2886f5c1e466f21cc41b70c5a0cd47bfa0015eb2d5793c88ebce658600e25"}, + {file = "matplotlib-3.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:12eaf48463b472c3c0f8dbacdbf906e573013df81a0ab82f0616ea4b11281908"}, + {file = "matplotlib-3.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fbbabc82fde51391c4da5006f965e36d86d95f6ee83fb594b279564a4c5d0d2"}, + {file = "matplotlib-3.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad2e15300530c1a94c63cfa546e3b7864bd18ea2901317bae8bbf06a5ade6dcf"}, + {file = "matplotlib-3.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3547d153d70233a8496859097ef0312212e2689cdf8d7ed764441c77604095ae"}, + {file = "matplotlib-3.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:c55b20591ced744aa04e8c3e4b7543ea4d650b6c3c4b208c08a05b4010e8b442"}, + {file = "matplotlib-3.10.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ade1003376731a971e398cc4ef38bb83ee8caf0aee46ac6daa4b0506db1fd06"}, + {file = "matplotlib-3.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:95b710fea129c76d30be72c3b38f330269363fbc6e570a5dd43580487380b5ff"}, + {file = "matplotlib-3.10.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cdbaf909887373c3e094b0318d7ff230b2ad9dcb64da7ade654182872ab2593"}, + {file = "matplotlib-3.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d907fddb39f923d011875452ff1eca29a9e7f21722b873e90db32e5d8ddff12e"}, + {file = "matplotlib-3.10.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3b427392354d10975c1d0f4ee18aa5844640b512d5311ef32efd4dd7db106ede"}, + {file = "matplotlib-3.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5fd41b0ec7ee45cd960a8e71aea7c946a28a0b8a4dcee47d2856b2af051f334c"}, + {file = "matplotlib-3.10.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:81713dd0d103b379de4516b861d964b1d789a144103277769238c732229d7f03"}, + {file = "matplotlib-3.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:359f87baedb1f836ce307f0e850d12bb5f1936f70d035561f90d41d305fdacea"}, + {file = "matplotlib-3.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae80dc3a4add4665cf2faa90138384a7ffe2a4e37c58d83e115b54287c4f06ef"}, + {file = "matplotlib-3.10.0.tar.gz", hash = "sha256:b886d02a581b96704c9d1ffe55709e49b4d2d52709ccebc4be42db856e511278"}, ] [package.dependencies] @@ -3499,7 +3455,7 @@ pyparsing = ">=2.3.1" python-dateutil = ">=2.7" [package.extras] -dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6)", "setuptools (>=64)", "setuptools_scm (>=7)"] +dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] [[package]] name = "matplotlib-inline" @@ -3557,15 +3513,18 @@ all = ["h5py", "netCDF4"] [[package]] name = "mistune" -version = "3.0.2" +version = "3.1.0" description = "A sane and fast Markdown parser with useful plugins and renderers" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"}, - {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, + {file = "mistune-3.1.0-py3-none-any.whl", hash = "sha256:b05198cf6d671b3deba6c87ec6cf0d4eb7b72c524636eddb6dbf13823b52cee1"}, + {file = "mistune-3.1.0.tar.gz", hash = "sha256:dbcac2f78292b9dc066cd03b7a3a26b62d85f8159f2ea5fd28e55df79908d667"}, ] +[package.dependencies] +typing-extensions = {version = "*", markers = "python_version < \"3.11\""} + [[package]] name = "mpmath" version = "1.3.0" @@ -3764,13 +3723,13 @@ files = [ [[package]] name = "nbclient" -version = "0.10.0" +version = "0.10.2" description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." optional = true -python-versions = ">=3.8.0" +python-versions = ">=3.9.0" files = [ - {file = "nbclient-0.10.0-py3-none-any.whl", hash = "sha256:f13e3529332a1f1f81d82a53210322476a168bb7090a0289c795fe9cc11c9d3f"}, - {file = "nbclient-0.10.0.tar.gz", hash = "sha256:4b3f1b7dba531e498449c4db4f53da339c91d449dc11e9af3a43b4eb5c5abb09"}, + {file = "nbclient-0.10.2-py3-none-any.whl", hash = "sha256:4ffee11e788b4a27fabeb7955547e4318a5298f34342a4bfd01f2e1faaeadc3d"}, + {file = "nbclient-0.10.2.tar.gz", hash = "sha256:90b7fc6b810630db87a6d0c2250b1f0ab4cf4d3c27a299b0cde78a4ed3fd9193"}, ] [package.dependencies] @@ -3781,23 +3740,23 @@ traitlets = ">=5.4" [package.extras] dev = ["pre-commit"] -docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling"] -test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] +docs = ["autodoc-traits", "flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "mock", "moto", "myst-parser", "nbconvert (>=7.1.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling", "testpath", "xmltodict"] +test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.1.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] [[package]] name = "nbconvert" -version = "7.16.4" +version = "7.16.5" description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." optional = true python-versions = ">=3.8" files = [ - {file = "nbconvert-7.16.4-py3-none-any.whl", hash = "sha256:05873c620fe520b6322bf8a5ad562692343fe3452abda5765c7a34b7d1aa3eb3"}, - {file = "nbconvert-7.16.4.tar.gz", hash = "sha256:86ca91ba266b0a448dc96fa6c5b9d98affabde2867b363258703536807f9f7f4"}, + {file = "nbconvert-7.16.5-py3-none-any.whl", hash = "sha256:e12eac052d6fd03040af4166c563d76e7aeead2e9aadf5356db552a1784bd547"}, + {file = "nbconvert-7.16.5.tar.gz", hash = "sha256:c83467bb5777fdfaac5ebbb8e864f300b277f68692ecc04d6dab72f2d8442344"}, ] [package.dependencies] beautifulsoup4 = "*" -bleach = "!=5.0.0" +bleach = {version = "!=5.0.0", extras = ["css"]} defusedxml = "*" jinja2 = ">=3.0" jupyter-core = ">=4.7" @@ -3809,7 +3768,6 @@ nbformat = ">=5.7" packaging = "*" pandocfilters = ">=1.4.1" pygments = ">=2.4.1" -tinycss2 = "*" traitlets = ">=5.1" [package.extras] @@ -3855,13 +3813,13 @@ files = [ [[package]] name = "networkx" -version = "3.4" +version = "3.4.2" description = "Python package for creating and manipulating graphs and networks" optional = false python-versions = ">=3.10" files = [ - {file = "networkx-3.4-py3-none-any.whl", hash = "sha256:46dad0ec74a825a968e2b36c37ef5b91faa3868f017b2283d9cbff33112222ce"}, - {file = "networkx-3.4.tar.gz", hash = "sha256:1269b90f8f0d3a4095f016f49650f35ac169729f49b69d0572b2bb142748162b"}, + {file = "networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f"}, + {file = "networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1"}, ] [package.extras] @@ -3897,18 +3855,18 @@ files = [ [[package]] name = "notebook" -version = "7.2.2" +version = "7.3.2" description = "Jupyter Notebook - A web-based notebook environment for interactive computing" optional = true python-versions = ">=3.8" files = [ - {file = "notebook-7.2.2-py3-none-any.whl", hash = "sha256:c89264081f671bc02eec0ed470a627ed791b9156cad9285226b31611d3e9fe1c"}, - {file = "notebook-7.2.2.tar.gz", hash = "sha256:2ef07d4220421623ad3fe88118d687bc0450055570cdd160814a59cf3a1c516e"}, + {file = "notebook-7.3.2-py3-none-any.whl", hash = "sha256:e5f85fc59b69d3618d73cf27544418193ff8e8058d5bf61d315ce4f473556288"}, + {file = "notebook-7.3.2.tar.gz", hash = "sha256:705e83a1785f45b383bf3ee13cb76680b92d24f56fb0c7d2136fe1d850cd3ca8"}, ] [package.dependencies] jupyter-server = ">=2.4.0,<3" -jupyterlab = ">=4.2.0,<4.3" +jupyterlab = ">=4.3.4,<4.4" jupyterlab-server = ">=2.27.1,<3" notebook-shim = ">=0.2,<0.3" tornado = ">=6.2.0" @@ -3937,37 +3895,37 @@ test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync" [[package]] name = "numba" -version = "0.60.0" +version = "0.61.0rc2" description = "compiling Python code using LLVM" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "numba-0.60.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d761de835cd38fb400d2c26bb103a2726f548dc30368853121d66201672e651"}, - {file = "numba-0.60.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:159e618ef213fba758837f9837fb402bbe65326e60ba0633dbe6c7f274d42c1b"}, - {file = "numba-0.60.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1527dc578b95c7c4ff248792ec33d097ba6bef9eda466c948b68dfc995c25781"}, - {file = "numba-0.60.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe0b28abb8d70f8160798f4de9d486143200f34458d34c4a214114e445d7124e"}, - {file = "numba-0.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:19407ced081d7e2e4b8d8c36aa57b7452e0283871c296e12d798852bc7d7f198"}, - {file = "numba-0.60.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a17b70fc9e380ee29c42717e8cc0bfaa5556c416d94f9aa96ba13acb41bdece8"}, - {file = "numba-0.60.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fb02b344a2a80efa6f677aa5c40cd5dd452e1b35f8d1c2af0dfd9ada9978e4b"}, - {file = "numba-0.60.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f4fde652ea604ea3c86508a3fb31556a6157b2c76c8b51b1d45eb40c8598703"}, - {file = "numba-0.60.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4142d7ac0210cc86432b818338a2bc368dc773a2f5cf1e32ff7c5b378bd63ee8"}, - {file = "numba-0.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:cac02c041e9b5bc8cf8f2034ff6f0dbafccd1ae9590dc146b3a02a45e53af4e2"}, - {file = "numba-0.60.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7da4098db31182fc5ffe4bc42c6f24cd7d1cb8a14b59fd755bfee32e34b8404"}, - {file = "numba-0.60.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38d6ea4c1f56417076ecf8fc327c831ae793282e0ff51080c5094cb726507b1c"}, - {file = "numba-0.60.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:62908d29fb6a3229c242e981ca27e32a6e606cc253fc9e8faeb0e48760de241e"}, - {file = "numba-0.60.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0ebaa91538e996f708f1ab30ef4d3ddc344b64b5227b67a57aa74f401bb68b9d"}, - {file = "numba-0.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:f75262e8fe7fa96db1dca93d53a194a38c46da28b112b8a4aca168f0df860347"}, - {file = "numba-0.60.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:01ef4cd7d83abe087d644eaa3d95831b777aa21d441a23703d649e06b8e06b74"}, - {file = "numba-0.60.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:819a3dfd4630d95fd574036f99e47212a1af41cbcb019bf8afac63ff56834449"}, - {file = "numba-0.60.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b983bd6ad82fe868493012487f34eae8bf7dd94654951404114f23c3466d34b"}, - {file = "numba-0.60.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c151748cd269ddeab66334bd754817ffc0cabd9433acb0f551697e5151917d25"}, - {file = "numba-0.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:3031547a015710140e8c87226b4cfe927cac199835e5bf7d4fe5cb64e814e3ab"}, - {file = "numba-0.60.0.tar.gz", hash = "sha256:5df6158e5584eece5fc83294b949fd30b9f1125df7708862205217e068aabf16"}, -] - -[package.dependencies] -llvmlite = "==0.43.*" -numpy = ">=1.22,<2.1" + {file = "numba-0.61.0rc2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:77dc4fec42aad29b3a0f1cbfee87c21c1586aad47e27c68271ce6ebdf809be0f"}, + {file = "numba-0.61.0rc2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7587feed3509b959d2227a40ac784e36e4bdc35c0d213c71fd1239412cad73df"}, + {file = "numba-0.61.0rc2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2497241349335b0f6fc0c75fa64adce6e97ae114f49f1cc1024dae8da8ba14a6"}, + {file = "numba-0.61.0rc2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:d2ed9cf79ecceb2ef730dbb0ea5f40a9d1304ef75d16b2608cc6f658c3922780"}, + {file = "numba-0.61.0rc2-cp310-cp310-win_amd64.whl", hash = "sha256:b70dbb249bff1daeb2a69cfaf430745575b44812a3a2f5724e596a141e6be27a"}, + {file = "numba-0.61.0rc2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:89b73b8d10d3b7a446958f242dca482fc264dd6426409bac668a4ee90879a2a1"}, + {file = "numba-0.61.0rc2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7aba5dd383ef99d1ee107e9c3fd53d1fcdb211eae6e4d2be229ed9e4e11ce249"}, + {file = "numba-0.61.0rc2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e19d28a8e2f8736b2f9b1cebab75e8231f298519df6b97190e199d3e93af5311"}, + {file = "numba-0.61.0rc2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c023478d11ba54eb1af570858286a5d4dcd05bc6ef656333d08b76d4c2dde243"}, + {file = "numba-0.61.0rc2-cp311-cp311-win_amd64.whl", hash = "sha256:3c765bd90d1c3fc70e688514441657dd739dfad785d7834fe514b071cded03fc"}, + {file = "numba-0.61.0rc2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:e97d244a9b241ad63d2a29ce3d2e71345668ba05a981bd650c1802da133dc56b"}, + {file = "numba-0.61.0rc2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:45ecbda0ce90883077075aa904870ffbfdaa78f289adbc6941f4a89e1ca1b155"}, + {file = "numba-0.61.0rc2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1a80d9fbe948c7d0b9005dfb7eef5d3f21414b5dcd41280526602e9255808a5b"}, + {file = "numba-0.61.0rc2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:49addd490c3d08d7b72f8589b6ab25fdd84a288c236dc4060c70697f8a804986"}, + {file = "numba-0.61.0rc2-cp312-cp312-win_amd64.whl", hash = "sha256:6ef4ea1d5cdbfda89c12f1645153bf5044571fcc0452d0769bed1f02121004e2"}, + {file = "numba-0.61.0rc2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:44eca1801402510aa9c7b9867a1a3bcc8db2257be0fb3aaf3f4d85ba9417bb02"}, + {file = "numba-0.61.0rc2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9703e1f2a2c7b6a5c6a7799c2693d24c545813619c3d2868db9b0663e2dfd627"}, + {file = "numba-0.61.0rc2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38dbf5701734ae86f4526d1ac2e890596e4ae543ce5d3221a8d903e4c6240eea"}, + {file = "numba-0.61.0rc2-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:7498f69aae607a7628711b7df3fb4ab850628ed9c101e4aa3bfabf94a9fc09be"}, + {file = "numba-0.61.0rc2-cp313-cp313-win_amd64.whl", hash = "sha256:f93baaf90b12ee2ef01adeef5298bd29780a6655134af8c91f76e1c7790ea96a"}, + {file = "numba-0.61.0rc2.tar.gz", hash = "sha256:630b8ff1ddd567b488cb09c439cebcf750b95f960562f43e234c94a334fee14b"}, +] + +[package.dependencies] +llvmlite = "==0.44.*" +numpy = ">=1.24,<2.2" [[package]] name = "numcodecs" @@ -4008,64 +3966,87 @@ zfpy = ["numpy (<2.0.0)", "zfpy (>=1.0.0)"] [[package]] name = "numpy" -version = "1.26.4" +version = "2.1.3" description = "Fundamental package for array computing in Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c894b4305373b9c5576d7a12b473702afdf48ce5369c074ba304cc5ad8730dff"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b47fbb433d3260adcd51eb54f92a2ffbc90a4595f8970ee00e064c644ac788f5"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:825656d0743699c529c5943554d223c021ff0494ff1442152ce887ef4f7561a1"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:6a4825252fcc430a182ac4dee5a505053d262c807f8a924603d411f6718b88fd"}, + {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e711e02f49e176a01d0349d82cb5f05ba4db7d5e7e0defd026328e5cfb3226d3"}, + {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78574ac2d1a4a02421f25da9559850d59457bac82f2b8d7a44fe83a64f770098"}, + {file = "numpy-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c7662f0e3673fe4e832fe07b65c50342ea27d989f92c80355658c7f888fcc83c"}, + {file = "numpy-2.1.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fa2d1337dc61c8dc417fbccf20f6d1e139896a30721b7f1e832b2bb6ef4eb6c4"}, + {file = "numpy-2.1.3-cp310-cp310-win32.whl", hash = "sha256:72dcc4a35a8515d83e76b58fdf8113a5c969ccd505c8a946759b24e3182d1f23"}, + {file = "numpy-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:ecc76a9ba2911d8d37ac01de72834d8849e55473457558e12995f4cd53e778e0"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4d1167c53b93f1f5d8a139a742b3c6f4d429b54e74e6b57d0eff40045187b15d"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c80e4a09b3d95b4e1cac08643f1152fa71a0a821a2d4277334c88d54b2219a41"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:576a1c1d25e9e02ed7fa5477f30a127fe56debd53b8d2c89d5578f9857d03ca9"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:973faafebaae4c0aaa1a1ca1ce02434554d67e628b8d805e61f874b84e136b09"}, + {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:762479be47a4863e261a840e8e01608d124ee1361e48b96916f38b119cfda04a"}, + {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f24b3d1ecc1eebfbf5d6051faa49af40b03be1aaa781ebdadcbc090b4539b"}, + {file = "numpy-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:17ee83a1f4fef3c94d16dc1802b998668b5419362c8a4f4e8a491de1b41cc3ee"}, + {file = "numpy-2.1.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15cb89f39fa6d0bdfb600ea24b250e5f1a3df23f901f51c8debaa6a5d122b2f0"}, + {file = "numpy-2.1.3-cp311-cp311-win32.whl", hash = "sha256:d9beb777a78c331580705326d2367488d5bc473b49a9bc3036c154832520aca9"}, + {file = "numpy-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:d89dd2b6da69c4fff5e39c28a382199ddedc3a5be5390115608345dec660b9e2"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f55ba01150f52b1027829b50d70ef1dafd9821ea82905b63936668403c3b471e"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13138eadd4f4da03074851a698ffa7e405f41a0845a6b1ad135b81596e4e9958"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a6b46587b14b888e95e4a24d7b13ae91fa22386c199ee7b418f449032b2fa3b8"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:0fa14563cc46422e99daef53d725d0c326e99e468a9320a240affffe87852564"}, + {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8637dcd2caa676e475503d1f8fdb327bc495554e10838019651b76d17b98e512"}, + {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2312b2aa89e1f43ecea6da6ea9a810d06aae08321609d8dc0d0eda6d946a541b"}, + {file = "numpy-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a38c19106902bb19351b83802531fea19dee18e5b37b36454f27f11ff956f7fc"}, + {file = "numpy-2.1.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02135ade8b8a84011cbb67dc44e07c58f28575cf9ecf8ab304e51c05528c19f0"}, + {file = "numpy-2.1.3-cp312-cp312-win32.whl", hash = "sha256:e6988e90fcf617da2b5c78902fe8e668361b43b4fe26dbf2d7b0f8034d4cafb9"}, + {file = "numpy-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:0d30c543f02e84e92c4b1f415b7c6b5326cbe45ee7882b6b77db7195fb971e3a"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe"}, + {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43"}, + {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56"}, + {file = "numpy-2.1.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a"}, + {file = "numpy-2.1.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef"}, + {file = "numpy-2.1.3-cp313-cp313-win32.whl", hash = "sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f"}, + {file = "numpy-2.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0"}, + {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408"}, + {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6"}, + {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f"}, + {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17"}, + {file = "numpy-2.1.3-cp313-cp313t-win32.whl", hash = "sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48"}, + {file = "numpy-2.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4f2015dfe437dfebbfce7c85c7b53d81ba49e71ba7eadbf1df40c915af75979f"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:3522b0dfe983a575e6a9ab3a4a4dfe156c3e428468ff08ce582b9bb6bd1d71d4"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c006b607a865b07cd981ccb218a04fc86b600411d83d6fc261357f1c0966755d"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e14e26956e6f1696070788252dcdff11b4aca4c3e8bd166e0df1bb8f315a67cb"}, + {file = "numpy-2.1.3.tar.gz", hash = "sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761"}, ] [[package]] name = "numpy-stl" -version = "3.1.2" +version = "3.2.0" description = "Library to make reading, writing and modifying both binary and ascii STL files easy." optional = true -python-versions = ">3.6.0" +python-versions = ">3.9.0" files = [ - {file = "numpy_stl-3.1.2-py3-none-any.whl", hash = "sha256:a55288340c837378bf44753a1c595c6823312995acda97f27ed04db4ff1d25f3"}, - {file = "numpy_stl-3.1.2.tar.gz", hash = "sha256:72b46950dfa3642df1c7b873cfa78a548533724b907478c567db42fdf57ee3d2"}, + {file = "numpy_stl-3.2.0-py3-none-any.whl", hash = "sha256:697c81b107231362460aaedf4647e81ba54f54f59c896772fea7904c9c439da5"}, + {file = "numpy_stl-3.2.0.tar.gz", hash = "sha256:5a20c3f79cddaa0abc6a4b99f5486aceed4f88152f29b19a57acc844e183fd4d"}, ] [package.dependencies] numpy = "*" python-utils = ">=3.4.5" +[package.extras] +docs = ["mock", "python-utils", "sphinx"] +tests = ["Sphinx", "cov-core", "coverage", "cython", "docutils", "execnet", "flake8", "numpy", "pep8", "py", "pyflakes", "pytest", "pytest-cache", "pytest-cov", "python-utils", "wheel"] + [[package]] name = "nvidia-cublas-cu12" version = "12.1.3.1" @@ -4189,14 +4170,14 @@ files = [ [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.6.77" +version = "12.6.85" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:3bf10d85bb1801e9c894c6e197e44dd137d2a0a9e43f8450e9ad13f2df0dd52d"}, - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9ae346d16203ae4ea513be416495167a0101d33d2d14935aa9c1829a3fb45142"}, - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:410718cd44962bed862a31dd0318620f6f9a8b28a6291967bcfcb446a6516771"}, + {file = "nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a"}, + {file = "nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41"}, + {file = "nvidia_nvjitlink_cu12-12.6.85-py3-none-win_amd64.whl", hash = "sha256:e61120e52ed675747825cdd16febc6a0730537451d867ee58bee3853b1b13d1c"}, ] [[package]] @@ -4279,10 +4260,10 @@ files = [ [package.dependencies] numpy = [ + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, - {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] [[package]] @@ -4303,21 +4284,21 @@ files = [ [package.dependencies] numpy = [ + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, - {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] [[package]] name = "orderly-set" -version = "5.2.2" +version = "5.2.3" description = "Orderly set" optional = false python-versions = ">=3.8" files = [ - {file = "orderly_set-5.2.2-py3-none-any.whl", hash = "sha256:f7a37c95a38c01cdfe41c3ffb62925a318a2286ea0a41790c057fc802aec54da"}, - {file = "orderly_set-5.2.2.tar.gz", hash = "sha256:52a18b86aaf3f5d5a498bbdb27bf3253a4e5c57ab38e5b7a56fa00115cd28448"}, + {file = "orderly_set-5.2.3-py3-none-any.whl", hash = "sha256:d357cedcf67f4ebff0d4cbd5b0997e98eeb65dd24fdf5c990a501ae9e82c7d34"}, + {file = "orderly_set-5.2.3.tar.gz", hash = "sha256:571ed97c5a5fca7ddeb6b2d26c19aca896b0ed91f334d9c109edd2f265fb3017"}, ] [[package]] @@ -4333,13 +4314,13 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] @@ -4395,9 +4376,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2", markers = "python_version == \"3.11\""}, {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.22.4", markers = "python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -4520,98 +4501,89 @@ docs = ["Sphinx (>=4.1.2,<5.0.0)", "furo (>=2021.8.17-beta.43,<2022.0.0)", "myst [[package]] name = "pillow" -version = "10.4.0" +version = "11.1.0" description = "Python Imaging Library (Fork)" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, - {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, - {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, - {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, - {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, - {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, - {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, - {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, - {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, - {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, - {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, - {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, - {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, - {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, - {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, - {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, - {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, - {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, - {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, - {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] + {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"}, + {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482"}, + {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e"}, + {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269"}, + {file = "pillow-11.1.0-cp310-cp310-win32.whl", hash = "sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49"}, + {file = "pillow-11.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a"}, + {file = "pillow-11.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65"}, + {file = "pillow-11.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457"}, + {file = "pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1"}, + {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2"}, + {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96"}, + {file = "pillow-11.1.0-cp311-cp311-win32.whl", hash = "sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f"}, + {file = "pillow-11.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761"}, + {file = "pillow-11.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71"}, + {file = "pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a"}, + {file = "pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f"}, + {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91"}, + {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c"}, + {file = "pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6"}, + {file = "pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf"}, + {file = "pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5"}, + {file = "pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc"}, + {file = "pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114"}, + {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352"}, + {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3"}, + {file = "pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9"}, + {file = "pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c"}, + {file = "pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65"}, + {file = "pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861"}, + {file = "pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081"}, + {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c"}, + {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547"}, + {file = "pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab"}, + {file = "pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9"}, + {file = "pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe"}, + {file = "pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756"}, + {file = "pillow-11.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6"}, + {file = "pillow-11.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884"}, + {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196"}, + {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8"}, + {file = "pillow-11.1.0-cp39-cp39-win32.whl", hash = "sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5"}, + {file = "pillow-11.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f"}, + {file = "pillow-11.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0"}, + {file = "pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] typing = ["typing-extensions"] xmp = ["defusedxml"] @@ -4696,13 +4668,13 @@ virtualenv = ">=20.10.0" [[package]] name = "prometheus-client" -version = "0.21.0" +version = "0.21.1" description = "Python client for the Prometheus monitoring system." optional = true python-versions = ">=3.8" files = [ - {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, - {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, + {file = "prometheus_client-0.21.1-py3-none-any.whl", hash = "sha256:594b45c410d6f4f8888940fe80b5cc2521b305a1fafe1c58609ef715a001f301"}, + {file = "prometheus_client-0.21.1.tar.gz", hash = "sha256:252505a722ac04b0456be05c05f75f45d760c2911ffc45f2a06bcaed9f3ae3fb"}, ] [package.extras] @@ -4724,159 +4696,144 @@ wcwidth = "*" [[package]] name = "propcache" -version = "0.2.0" +version = "0.2.1" description = "Accelerated property cache" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"}, - {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"}, - {file = "propcache-0.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33ac8f098df0585c0b53009f039dfd913b38c1d2edafed0cedcc0c32a05aa110"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e48e8875e6c13909c800fa344cd54cc4b2b0db1d5f911f840458a500fde2c2"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388f3217649d6d59292b722d940d4d2e1e6a7003259eb835724092a1cca0203a"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f571aea50ba5623c308aa146eb650eebf7dbe0fd8c5d946e28343cb3b5aad577"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dfafb44f7bb35c0c06eda6b2ab4bfd58f02729e7c4045e179f9a861b07c9850"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3ebe9a75be7ab0b7da2464a77bb27febcb4fab46a34f9288f39d74833db7f61"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2f0d0f976985f85dfb5f3d685697ef769faa6b71993b46b295cdbbd6be8cc37"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a3dc1a4b165283bd865e8f8cb5f0c64c05001e0718ed06250d8cac9bec115b48"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e0f07b42d2a50c7dd2d8675d50f7343d998c64008f1da5fef888396b7f84630"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e63e3e1e0271f374ed489ff5ee73d4b6e7c60710e1f76af5f0e1a6117cd26394"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:56bb5c98f058a41bb58eead194b4db8c05b088c93d94d5161728515bd52b052b"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7665f04d0c7f26ff8bb534e1c65068409bf4687aa2534faf7104d7182debb336"}, - {file = "propcache-0.2.0-cp310-cp310-win32.whl", hash = "sha256:7cf18abf9764746b9c8704774d8b06714bcb0a63641518a3a89c7f85cc02c2ad"}, - {file = "propcache-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:cfac69017ef97db2438efb854edf24f5a29fd09a536ff3a992b75990720cdc99"}, - {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:63f13bf09cc3336eb04a837490b8f332e0db41da66995c9fd1ba04552e516354"}, - {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608cce1da6f2672a56b24a015b42db4ac612ee709f3d29f27a00c943d9e851de"}, - {file = "propcache-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:466c219deee4536fbc83c08d09115249db301550625c7fef1c5563a584c9bc87"}, - {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc2db02409338bf36590aa985a461b2c96fce91f8e7e0f14c50c5fcc4f229016"}, - {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6ed8db0a556343d566a5c124ee483ae113acc9a557a807d439bcecc44e7dfbb"}, - {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91997d9cb4a325b60d4e3f20967f8eb08dfcb32b22554d5ef78e6fd1dda743a2"}, - {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c7dde9e533c0a49d802b4f3f218fa9ad0a1ce21f2c2eb80d5216565202acab4"}, - {file = "propcache-0.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffcad6c564fe6b9b8916c1aefbb37a362deebf9394bd2974e9d84232e3e08504"}, - {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:97a58a28bcf63284e8b4d7b460cbee1edaab24634e82059c7b8c09e65284f178"}, - {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:945db8ee295d3af9dbdbb698cce9bbc5c59b5c3fe328bbc4387f59a8a35f998d"}, - {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39e104da444a34830751715f45ef9fc537475ba21b7f1f5b0f4d71a3b60d7fe2"}, - {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c5ecca8f9bab618340c8e848d340baf68bcd8ad90a8ecd7a4524a81c1764b3db"}, - {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c436130cc779806bdf5d5fae0d848713105472b8566b75ff70048c47d3961c5b"}, - {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:191db28dc6dcd29d1a3e063c3be0b40688ed76434622c53a284e5427565bbd9b"}, - {file = "propcache-0.2.0-cp311-cp311-win32.whl", hash = "sha256:5f2564ec89058ee7c7989a7b719115bdfe2a2fb8e7a4543b8d1c0cc4cf6478c1"}, - {file = "propcache-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e2e54267980349b723cff366d1e29b138b9a60fa376664a157a342689553f71"}, - {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ee7606193fb267be4b2e3b32714f2d58cad27217638db98a60f9efb5efeccc2"}, - {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:91ee8fc02ca52e24bcb77b234f22afc03288e1dafbb1f88fe24db308910c4ac7"}, - {file = "propcache-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e900bad2a8456d00a113cad8c13343f3b1f327534e3589acc2219729237a2e8"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f52a68c21363c45297aca15561812d542f8fc683c85201df0bebe209e349f793"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e41d67757ff4fbc8ef2af99b338bfb955010444b92929e9e55a6d4dcc3c4f09"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a64e32f8bd94c105cc27f42d3b658902b5bcc947ece3c8fe7bc1b05982f60e89"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55346705687dbd7ef0d77883ab4f6fabc48232f587925bdaf95219bae072491e"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00181262b17e517df2cd85656fcd6b4e70946fe62cd625b9d74ac9977b64d8d9"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6994984550eaf25dd7fc7bd1b700ff45c894149341725bb4edc67f0ffa94efa4"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:56295eb1e5f3aecd516d91b00cfd8bf3a13991de5a479df9e27dd569ea23959c"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:439e76255daa0f8151d3cb325f6dd4a3e93043e6403e6491813bcaaaa8733887"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f6475a1b2ecb310c98c28d271a30df74f9dd436ee46d09236a6b750a7599ce57"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3444cdba6628accf384e349014084b1cacd866fbb88433cd9d279d90a54e0b23"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4a9d9b4d0a9b38d1c391bb4ad24aa65f306c6f01b512e10a8a34a2dc5675d348"}, - {file = "propcache-0.2.0-cp312-cp312-win32.whl", hash = "sha256:69d3a98eebae99a420d4b28756c8ce6ea5a29291baf2dc9ff9414b42676f61d5"}, - {file = "propcache-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ad9c9b99b05f163109466638bd30ada1722abb01bbb85c739c50b6dc11f92dc3"}, - {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecddc221a077a8132cf7c747d5352a15ed763b674c0448d811f408bf803d9ad7"}, - {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0e53cb83fdd61cbd67202735e6a6687a7b491c8742dfc39c9e01e80354956763"}, - {file = "propcache-0.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92fe151145a990c22cbccf9ae15cae8ae9eddabfc949a219c9f667877e40853d"}, - {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a21ef516d36909931a2967621eecb256018aeb11fc48656e3257e73e2e247a"}, - {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f88a4095e913f98988f5b338c1d4d5d07dbb0b6bad19892fd447484e483ba6b"}, - {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a5b3bb545ead161be780ee85a2b54fdf7092815995661947812dde94a40f6fb"}, - {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67aeb72e0f482709991aa91345a831d0b707d16b0257e8ef88a2ad246a7280bf"}, - {file = "propcache-0.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c997f8c44ec9b9b0bcbf2d422cc00a1d9b9c681f56efa6ca149a941e5560da2"}, - {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a66df3d4992bc1d725b9aa803e8c5a66c010c65c741ad901e260ece77f58d2f"}, - {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3ebbcf2a07621f29638799828b8d8668c421bfb94c6cb04269130d8de4fb7136"}, - {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1235c01ddaa80da8235741e80815ce381c5267f96cc49b1477fdcf8c047ef325"}, - {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3947483a381259c06921612550867b37d22e1df6d6d7e8361264b6d037595f44"}, - {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d5bed7f9805cc29c780f3aee05de3262ee7ce1f47083cfe9f77471e9d6777e83"}, - {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4a91d44379f45f5e540971d41e4626dacd7f01004826a18cb048e7da7e96544"}, - {file = "propcache-0.2.0-cp313-cp313-win32.whl", hash = "sha256:f902804113e032e2cdf8c71015651c97af6418363bea8d78dc0911d56c335032"}, - {file = "propcache-0.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:8f188cfcc64fb1266f4684206c9de0e80f54622c3f22a910cbd200478aeae61e"}, - {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:53d1bd3f979ed529f0805dd35ddaca330f80a9a6d90bc0121d2ff398f8ed8861"}, - {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:83928404adf8fb3d26793665633ea79b7361efa0287dfbd372a7e74311d51ee6"}, - {file = "propcache-0.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77a86c261679ea5f3896ec060be9dc8e365788248cc1e049632a1be682442063"}, - {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218db2a3c297a3768c11a34812e63b3ac1c3234c3a086def9c0fee50d35add1f"}, - {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7735e82e3498c27bcb2d17cb65d62c14f1100b71723b68362872bca7d0913d90"}, - {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20a617c776f520c3875cf4511e0d1db847a076d720714ae35ffe0df3e440be68"}, - {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b69535c870670c9f9b14a75d28baa32221d06f6b6fa6f77a0a13c5a7b0a5b9"}, - {file = "propcache-0.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4569158070180c3855e9c0791c56be3ceeb192defa2cdf6a3f39e54319e56b89"}, - {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:db47514ffdbd91ccdc7e6f8407aac4ee94cc871b15b577c1c324236b013ddd04"}, - {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:2a60ad3e2553a74168d275a0ef35e8c0a965448ffbc3b300ab3a5bb9956c2162"}, - {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:662dd62358bdeaca0aee5761de8727cfd6861432e3bb828dc2a693aa0471a563"}, - {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:25a1f88b471b3bc911d18b935ecb7115dff3a192b6fef46f0bfaf71ff4f12418"}, - {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:f60f0ac7005b9f5a6091009b09a419ace1610e163fa5deaba5ce3484341840e7"}, - {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:74acd6e291f885678631b7ebc85d2d4aec458dd849b8c841b57ef04047833bed"}, - {file = "propcache-0.2.0-cp38-cp38-win32.whl", hash = "sha256:d9b6ddac6408194e934002a69bcaadbc88c10b5f38fb9307779d1c629181815d"}, - {file = "propcache-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:676135dcf3262c9c5081cc8f19ad55c8a64e3f7282a21266d05544450bffc3a5"}, - {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:25c8d773a62ce0451b020c7b29a35cfbc05de8b291163a7a0f3b7904f27253e6"}, - {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:375a12d7556d462dc64d70475a9ee5982465fbb3d2b364f16b86ba9135793638"}, - {file = "propcache-0.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1ec43d76b9677637a89d6ab86e1fef70d739217fefa208c65352ecf0282be957"}, - {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f45eec587dafd4b2d41ac189c2156461ebd0c1082d2fe7013571598abb8505d1"}, - {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc092ba439d91df90aea38168e11f75c655880c12782facf5cf9c00f3d42b562"}, - {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa1076244f54bb76e65e22cb6910365779d5c3d71d1f18b275f1dfc7b0d71b4d"}, - {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:682a7c79a2fbf40f5dbb1eb6bfe2cd865376deeac65acf9beb607505dced9e12"}, - {file = "propcache-0.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e40876731f99b6f3c897b66b803c9e1c07a989b366c6b5b475fafd1f7ba3fb8"}, - {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:363ea8cd3c5cb6679f1c2f5f1f9669587361c062e4899fce56758efa928728f8"}, - {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:140fbf08ab3588b3468932974a9331aff43c0ab8a2ec2c608b6d7d1756dbb6cb"}, - {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e70fac33e8b4ac63dfc4c956fd7d85a0b1139adcfc0d964ce288b7c527537fea"}, - {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b33d7a286c0dc1a15f5fc864cc48ae92a846df287ceac2dd499926c3801054a6"}, - {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f6d5749fdd33d90e34c2efb174c7e236829147a2713334d708746e94c4bde40d"}, - {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22aa8f2272d81d9317ff5756bb108021a056805ce63dd3630e27d042c8092798"}, - {file = "propcache-0.2.0-cp39-cp39-win32.whl", hash = "sha256:73e4b40ea0eda421b115248d7e79b59214411109a5bc47d0d48e4c73e3b8fcf9"}, - {file = "propcache-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:9517d5e9e0731957468c29dbfd0f976736a0e55afaea843726e887f36fe017df"}, - {file = "propcache-0.2.0-py3-none-any.whl", hash = "sha256:2ccc28197af5313706511fab3a8b66dcd6da067a1331372c82ea1cb74285e036"}, - {file = "propcache-0.2.0.tar.gz", hash = "sha256:df81779732feb9d01e5d513fad0122efb3d53bbc75f61b2a4f29a020bc985e70"}, + {file = "propcache-0.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6b3f39a85d671436ee3d12c017f8fdea38509e4f25b28eb25877293c98c243f6"}, + {file = "propcache-0.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d51fbe4285d5db5d92a929e3e21536ea3dd43732c5b177c7ef03f918dff9f2"}, + {file = "propcache-0.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6445804cf4ec763dc70de65a3b0d9954e868609e83850a47ca4f0cb64bd79fea"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9479aa06a793c5aeba49ce5c5692ffb51fcd9a7016e017d555d5e2b0045d212"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9631c5e8b5b3a0fda99cb0d29c18133bca1e18aea9effe55adb3da1adef80d3"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3156628250f46a0895f1f36e1d4fbe062a1af8718ec3ebeb746f1d23f0c5dc4d"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b6fb63ae352e13748289f04f37868099e69dba4c2b3e271c46061e82c745634"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:887d9b0a65404929641a9fabb6452b07fe4572b269d901d622d8a34a4e9043b2"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a96dc1fa45bd8c407a0af03b2d5218392729e1822b0c32e62c5bf7eeb5fb3958"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a7e65eb5c003a303b94aa2c3852ef130230ec79e349632d030e9571b87c4698c"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:999779addc413181912e984b942fbcc951be1f5b3663cd80b2687758f434c583"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:19a0f89a7bb9d8048d9c4370c9c543c396e894c76be5525f5e1ad287f1750ddf"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1ac2f5fe02fa75f56e1ad473f1175e11f475606ec9bd0be2e78e4734ad575034"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:574faa3b79e8ebac7cb1d7930f51184ba1ccf69adfdec53a12f319a06030a68b"}, + {file = "propcache-0.2.1-cp310-cp310-win32.whl", hash = "sha256:03ff9d3f665769b2a85e6157ac8b439644f2d7fd17615a82fa55739bc97863f4"}, + {file = "propcache-0.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:2d3af2e79991102678f53e0dbf4c35de99b6b8b58f29a27ca0325816364caaba"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ffc3cca89bb438fb9c95c13fc874012f7b9466b89328c3c8b1aa93cdcfadd16"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f174bbd484294ed9fdf09437f889f95807e5f229d5d93588d34e92106fbf6717"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:70693319e0b8fd35dd863e3e29513875eb15c51945bf32519ef52927ca883bc3"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b480c6a4e1138e1aa137c0079b9b6305ec6dcc1098a8ca5196283e8a49df95a9"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d27b84d5880f6d8aa9ae3edb253c59d9f6642ffbb2c889b78b60361eed449787"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:857112b22acd417c40fa4595db2fe28ab900c8c5fe4670c7989b1c0230955465"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf6c4150f8c0e32d241436526f3c3f9cbd34429492abddbada2ffcff506c51af"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66d4cfda1d8ed687daa4bc0274fcfd5267873db9a5bc0418c2da19273040eeb7"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c2f992c07c0fca81655066705beae35fc95a2fa7366467366db627d9f2ee097f"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:4a571d97dbe66ef38e472703067021b1467025ec85707d57e78711c085984e54"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bb6178c241278d5fe853b3de743087be7f5f4c6f7d6d22a3b524d323eecec505"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ad1af54a62ffe39cf34db1aa6ed1a1873bd548f6401db39d8e7cd060b9211f82"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e7048abd75fe40712005bcfc06bb44b9dfcd8e101dda2ecf2f5aa46115ad07ca"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:160291c60081f23ee43d44b08a7e5fb76681221a8e10b3139618c5a9a291b84e"}, + {file = "propcache-0.2.1-cp311-cp311-win32.whl", hash = "sha256:819ce3b883b7576ca28da3861c7e1a88afd08cc8c96908e08a3f4dd64a228034"}, + {file = "propcache-0.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:edc9fc7051e3350643ad929df55c451899bb9ae6d24998a949d2e4c87fb596d3"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:081a430aa8d5e8876c6909b67bd2d937bfd531b0382d3fdedb82612c618bc41a"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2ccec9ac47cf4e04897619c0e0c1a48c54a71bdf045117d3a26f80d38ab1fb0"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14d86fe14b7e04fa306e0c43cdbeebe6b2c2156a0c9ce56b815faacc193e320d"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:049324ee97bb67285b49632132db351b41e77833678432be52bdd0289c0e05e4"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cd9a1d071158de1cc1c71a26014dcdfa7dd3d5f4f88c298c7f90ad6f27bb46d"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98110aa363f1bb4c073e8dcfaefd3a5cea0f0834c2aab23dda657e4dab2f53b5"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:647894f5ae99c4cf6bb82a1bb3a796f6e06af3caa3d32e26d2350d0e3e3faf24"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd3223c15bebe26518d58ccf9a39b93948d3dcb3e57a20480dfdd315356baff"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d71264a80f3fcf512eb4f18f59423fe82d6e346ee97b90625f283df56aee103f"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e73091191e4280403bde6c9a52a6999d69cdfde498f1fdf629105247599b57ec"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3935bfa5fede35fb202c4b569bb9c042f337ca4ff7bd540a0aa5e37131659348"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f508b0491767bb1f2b87fdfacaba5f7eddc2f867740ec69ece6d1946d29029a6"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1672137af7c46662a1c2be1e8dc78cb6d224319aaa40271c9257d886be4363a6"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518"}, + {file = "propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246"}, + {file = "propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aca405706e0b0a44cc6bfd41fbe89919a6a56999157f6de7e182a990c36e37bc"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:12d1083f001ace206fe34b6bdc2cb94be66d57a850866f0b908972f90996b3e9"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d93f3307ad32a27bda2e88ec81134b823c240aa3abb55821a8da553eed8d9439"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba278acf14471d36316159c94a802933d10b6a1e117b8554fe0d0d9b75c9d536"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e6281aedfca15301c41f74d7005e6e3f4ca143584ba696ac69df4f02f40d629"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b750a8e5a1262434fb1517ddf64b5de58327f1adc3524a5e44c2ca43305eb0b"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf72af5e0fb40e9babf594308911436c8efde3cb5e75b6f206c34ad18be5c052"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2d0a12018b04f4cb820781ec0dffb5f7c7c1d2a5cd22bff7fb055a2cb19ebce"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e800776a79a5aabdb17dcc2346a7d66d0777e942e4cd251defeb084762ecd17d"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4160d9283bd382fa6c0c2b5e017acc95bc183570cd70968b9202ad6d8fc48dce"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:30b43e74f1359353341a7adb783c8f1b1c676367b011709f466f42fda2045e95"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:58791550b27d5488b1bb52bc96328456095d96206a250d28d874fafe11b3dfaf"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f022d381747f0dfe27e99d928e31bc51a18b65bb9e481ae0af1380a6725dd1f"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:297878dc9d0a334358f9b608b56d02e72899f3b8499fc6044133f0d319e2ec30"}, + {file = "propcache-0.2.1-cp313-cp313-win32.whl", hash = "sha256:ddfab44e4489bd79bda09d84c430677fc7f0a4939a73d2bba3073036f487a0a6"}, + {file = "propcache-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:556fc6c10989f19a179e4321e5d678db8eb2924131e64652a51fe83e4c3db0e1"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6a9a8c34fb7bb609419a211e59da8887eeca40d300b5ea8e56af98f6fbbb1541"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae1aa1cd222c6d205853b3013c69cd04515f9d6ab6de4b0603e2e1c33221303e"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:accb6150ce61c9c4b7738d45550806aa2b71c7668c6942f17b0ac182b6142fd4"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eee736daafa7af6d0a2dc15cc75e05c64f37fc37bafef2e00d77c14171c2097"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7a31fc1e1bd362874863fdeed71aed92d348f5336fd84f2197ba40c59f061bd"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba4cfa1052819d16699e1d55d18c92b6e094d4517c41dd231a8b9f87b6fa681"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f089118d584e859c62b3da0892b88a83d611c2033ac410e929cb6754eec0ed16"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:781e65134efaf88feb447e8c97a51772aa75e48b794352f94cb7ea717dedda0d"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31f5af773530fd3c658b32b6bdc2d0838543de70eb9a2156c03e410f7b0d3aae"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a7a078f5d37bee6690959c813977da5291b24286e7b962e62a94cec31aa5188b"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:cea7daf9fc7ae6687cf1e2c049752f19f146fdc37c2cc376e7d0032cf4f25347"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b3489ff1ed1e8315674d0775dc7d2195fb13ca17b3808721b54dbe9fd020faf"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9403db39be1393618dd80c746cb22ccda168efce239c73af13c3763ef56ffc04"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5d97151bc92d2b2578ff7ce779cdb9174337390a535953cbb9452fb65164c587"}, + {file = "propcache-0.2.1-cp39-cp39-win32.whl", hash = "sha256:9caac6b54914bdf41bcc91e7eb9147d331d29235a7c967c150ef5df6464fd1bb"}, + {file = "propcache-0.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:92fc4500fcb33899b05ba73276dfb684a20d31caa567b7cb5252d48f896a91b1"}, + {file = "propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54"}, + {file = "propcache-0.2.1.tar.gz", hash = "sha256:3f77ce728b19cb537714499928fe800c3dda29e8d9428778fc7c186da4c09a64"}, ] [[package]] name = "protobuf" -version = "5.28.2" +version = "5.29.2" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-5.28.2-cp310-abi3-win32.whl", hash = "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d"}, - {file = "protobuf-5.28.2-cp310-abi3-win_amd64.whl", hash = "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132"}, - {file = "protobuf-5.28.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7"}, - {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f"}, - {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f"}, - {file = "protobuf-5.28.2-cp38-cp38-win32.whl", hash = "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0"}, - {file = "protobuf-5.28.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3"}, - {file = "protobuf-5.28.2-cp39-cp39-win32.whl", hash = "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36"}, - {file = "protobuf-5.28.2-cp39-cp39-win_amd64.whl", hash = "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276"}, - {file = "protobuf-5.28.2-py3-none-any.whl", hash = "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece"}, - {file = "protobuf-5.28.2.tar.gz", hash = "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0"}, + {file = "protobuf-5.29.2-cp310-abi3-win32.whl", hash = "sha256:c12ba8249f5624300cf51c3d0bfe5be71a60c63e4dcf51ffe9a68771d958c851"}, + {file = "protobuf-5.29.2-cp310-abi3-win_amd64.whl", hash = "sha256:842de6d9241134a973aab719ab42b008a18a90f9f07f06ba480df268f86432f9"}, + {file = "protobuf-5.29.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a0c53d78383c851bfa97eb42e3703aefdc96d2036a41482ffd55dc5f529466eb"}, + {file = "protobuf-5.29.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:494229ecd8c9009dd71eda5fd57528395d1eacdf307dbece6c12ad0dd09e912e"}, + {file = "protobuf-5.29.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:b6b0d416bbbb9d4fbf9d0561dbfc4e324fd522f61f7af0fe0f282ab67b22477e"}, + {file = "protobuf-5.29.2-cp38-cp38-win32.whl", hash = "sha256:e621a98c0201a7c8afe89d9646859859be97cb22b8bf1d8eacfd90d5bda2eb19"}, + {file = "protobuf-5.29.2-cp38-cp38-win_amd64.whl", hash = "sha256:13d6d617a2a9e0e82a88113d7191a1baa1e42c2cc6f5f1398d3b054c8e7e714a"}, + {file = "protobuf-5.29.2-cp39-cp39-win32.whl", hash = "sha256:36000f97ea1e76e8398a3f02936aac2a5d2b111aae9920ec1b769fc4a222c4d9"}, + {file = "protobuf-5.29.2-cp39-cp39-win_amd64.whl", hash = "sha256:2d2e674c58a06311c8e99e74be43e7f3a8d1e2b2fdf845eaa347fbd866f23355"}, + {file = "protobuf-5.29.2-py3-none-any.whl", hash = "sha256:fde4554c0e578a5a0bcc9a276339594848d1e89f9ea47b4427c80e5d72f90181"}, + {file = "protobuf-5.29.2.tar.gz", hash = "sha256:b2cc8e8bb7c9326996f0e160137b0861f1a82162502658df2951209d0cb0309e"}, ] [[package]] name = "psutil" -version = "6.0.0" +version = "6.1.1" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, - {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, - {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, - {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, - {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, - {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, - {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, - {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, - {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, - {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, + {file = "psutil-6.1.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:9ccc4316f24409159897799b83004cb1e24f9819b0dcf9c0b68bdcb6cefee6a8"}, + {file = "psutil-6.1.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ca9609c77ea3b8481ab005da74ed894035936223422dc591d6772b147421f777"}, + {file = "psutil-6.1.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:8df0178ba8a9e5bc84fed9cfa61d54601b371fbec5c8eebad27575f1e105c0d4"}, + {file = "psutil-6.1.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:1924e659d6c19c647e763e78670a05dbb7feaf44a0e9c94bf9e14dfc6ba50468"}, + {file = "psutil-6.1.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:018aeae2af92d943fdf1da6b58665124897cfc94faa2ca92098838f83e1b1bca"}, + {file = "psutil-6.1.1-cp27-none-win32.whl", hash = "sha256:6d4281f5bbca041e2292be3380ec56a9413b790579b8e593b1784499d0005dac"}, + {file = "psutil-6.1.1-cp27-none-win_amd64.whl", hash = "sha256:c777eb75bb33c47377c9af68f30e9f11bc78e0f07fbf907be4a5d70b2fe5f030"}, + {file = "psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8"}, + {file = "psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3"}, + {file = "psutil-6.1.1-cp36-cp36m-win32.whl", hash = "sha256:384636b1a64b47814437d1173be1427a7c83681b17a450bfc309a1953e329603"}, + {file = "psutil-6.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:8be07491f6ebe1a693f17d4f11e69d0dc1811fa082736500f649f79df7735303"}, + {file = "psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53"}, + {file = "psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649"}, + {file = "psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5"}, ] [package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] +dev = ["abi3audit", "black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"] +test = ["pytest", "pytest-xdist", "setuptools"] [[package]] name = "ptyprocess" @@ -4905,51 +4862,54 @@ tests = ["pytest"] [[package]] name = "pyarrow" -version = "17.0.0" +version = "18.1.0" description = "Python library for Apache Arrow" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, - {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, - {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"}, - {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"}, - {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"}, - {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"}, - {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"}, - {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"}, - {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"}, - {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"}, - {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"}, - {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"}, - {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"}, - {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"}, - {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"}, - {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"}, - {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"}, - {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"}, - {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"}, - {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"}, - {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, - {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, - {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, - {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, - {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, - {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, - {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, - {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, - {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, - {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, -] - -[package.dependencies] -numpy = ">=1.16.6" + {file = "pyarrow-18.1.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e21488d5cfd3d8b500b3238a6c4b075efabc18f0f6d80b29239737ebd69caa6c"}, + {file = "pyarrow-18.1.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:b516dad76f258a702f7ca0250885fc93d1fa5ac13ad51258e39d402bd9e2e1e4"}, + {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f443122c8e31f4c9199cb23dca29ab9427cef990f283f80fe15b8e124bcc49b"}, + {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a03da7f2758645d17b7b4f83c8bffeae5bbb7f974523fe901f36288d2eab71"}, + {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ba17845efe3aa358ec266cf9cc2800fa73038211fb27968bfa88acd09261a470"}, + {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:3c35813c11a059056a22a3bef520461310f2f7eea5c8a11ef9de7062a23f8d56"}, + {file = "pyarrow-18.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9736ba3c85129d72aefa21b4f3bd715bc4190fe4426715abfff90481e7d00812"}, + {file = "pyarrow-18.1.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:eaeabf638408de2772ce3d7793b2668d4bb93807deed1725413b70e3156a7854"}, + {file = "pyarrow-18.1.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:3b2e2239339c538f3464308fd345113f886ad031ef8266c6f004d49769bb074c"}, + {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f39a2e0ed32a0970e4e46c262753417a60c43a3246972cfc2d3eb85aedd01b21"}, + {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31e9417ba9c42627574bdbfeada7217ad8a4cbbe45b9d6bdd4b62abbca4c6f6"}, + {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:01c034b576ce0eef554f7c3d8c341714954be9b3f5d5bc7117006b85fcf302fe"}, + {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:f266a2c0fc31995a06ebd30bcfdb7f615d7278035ec5b1cd71c48d56daaf30b0"}, + {file = "pyarrow-18.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:d4f13eee18433f99adefaeb7e01d83b59f73360c231d4782d9ddfaf1c3fbde0a"}, + {file = "pyarrow-18.1.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:9f3a76670b263dc41d0ae877f09124ab96ce10e4e48f3e3e4257273cee61ad0d"}, + {file = "pyarrow-18.1.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:da31fbca07c435be88a0c321402c4e31a2ba61593ec7473630769de8346b54ee"}, + {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:543ad8459bc438efc46d29a759e1079436290bd583141384c6f7a1068ed6f992"}, + {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0743e503c55be0fdb5c08e7d44853da27f19dc854531c0570f9f394ec9671d54"}, + {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:d4b3d2a34780645bed6414e22dda55a92e0fcd1b8a637fba86800ad737057e33"}, + {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c52f81aa6f6575058d8e2c782bf79d4f9fdc89887f16825ec3a66607a5dd8e30"}, + {file = "pyarrow-18.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ad4892617e1a6c7a551cfc827e072a633eaff758fa09f21c4ee548c30bcaf99"}, + {file = "pyarrow-18.1.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:84e314d22231357d473eabec709d0ba285fa706a72377f9cc8e1cb3c8013813b"}, + {file = "pyarrow-18.1.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:f591704ac05dfd0477bb8f8e0bd4b5dc52c1cadf50503858dce3a15db6e46ff2"}, + {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acb7564204d3c40babf93a05624fc6a8ec1ab1def295c363afc40b0c9e66c191"}, + {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74de649d1d2ccb778f7c3afff6085bd5092aed4c23df9feeb45dd6b16f3811aa"}, + {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:f96bd502cb11abb08efea6dab09c003305161cb6c9eafd432e35e76e7fa9b90c"}, + {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:36ac22d7782554754a3b50201b607d553a8d71b78cdf03b33c1125be4b52397c"}, + {file = "pyarrow-18.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:25dbacab8c5952df0ca6ca0af28f50d45bd31c1ff6fcf79e2d120b4a65ee7181"}, + {file = "pyarrow-18.1.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a276190309aba7bc9d5bd2933230458b3521a4317acfefe69a354f2fe59f2bc"}, + {file = "pyarrow-18.1.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:ad514dbfcffe30124ce655d72771ae070f30bf850b48bc4d9d3b25993ee0e386"}, + {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aebc13a11ed3032d8dd6e7171eb6e86d40d67a5639d96c35142bd568b9299324"}, + {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6cf5c05f3cee251d80e98726b5c7cc9f21bab9e9783673bac58e6dfab57ecc8"}, + {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:11b676cd410cf162d3f6a70b43fb9e1e40affbc542a1e9ed3681895f2962d3d9"}, + {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:b76130d835261b38f14fc41fdfb39ad8d672afb84c447126b84d5472244cfaba"}, + {file = "pyarrow-18.1.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:0b331e477e40f07238adc7ba7469c36b908f07c89b95dd4bd3a0ec84a3d1e21e"}, + {file = "pyarrow-18.1.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:2c4dd0c9010a25ba03e198fe743b1cc03cd33c08190afff371749c52ccbbaf76"}, + {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f97b31b4c4e21ff58c6f330235ff893cc81e23da081b1a4b1c982075e0ed4e9"}, + {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a4813cb8ecf1809871fd2d64a8eff740a1bd3691bbe55f01a3cf6c5ec869754"}, + {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:05a5636ec3eb5cc2a36c6edb534a38ef57b2ab127292a716d00eabb887835f1e"}, + {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:73eeed32e724ea3568bb06161cad5fa7751e45bc2228e33dcb10c614044165c7"}, + {file = "pyarrow-18.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:a1880dd6772b685e803011a6b43a230c23b566859a6e0c9a276c1e0faf4f4052"}, + {file = "pyarrow-18.1.0.tar.gz", hash = "sha256:9386d3ca9c145b5539a1cfc75df07757dff870168c959b473a0bccbc3abc8c73"}, +] [package.extras] test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] @@ -4979,37 +4939,42 @@ test = ["numpy"] [[package]] name = "pyav" -version = "13.1.0" +version = "14.0.1" description = "Pythonic bindings for FFmpeg's libraries." optional = false python-versions = ">=3.10" files = [ - {file = "pyav-13.1.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:64a81022e60dfba7dee9767a6fd150f42293855ea127979b2f38a3fd86f908fd"}, - {file = "pyav-13.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3971089334cc91e331c5014c8ea5fcbca0ccc82eb14952c128ce50570010a3cf"}, - {file = "pyav-13.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:154394ba47b4b55d4abda3e66e2b0a79e7b046c983191cb6113ea14769eea53a"}, - {file = "pyav-13.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b48efcde03b9952ece3c8a8d9d74c685ff84ab91b60ea0ae6960638e30f3f31"}, - {file = "pyav-13.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:8404d5a5eef975862a35f2338ab8e7ae5d7a7f9af1ac748edef2aca4543f44cd"}, - {file = "pyav-13.1.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:a75d67dc80ea87f3987fafa5699410047af818b20691046c76d12e18faf3da68"}, - {file = "pyav-13.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4571175c8511d36128e94955b8cc64b0452e16da42c81ceae745946f88abf477"}, - {file = "pyav-13.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7e1bd1157b21ca116c71696be62cd12bcaefc32179fd99efad90e0a76d300d3"}, - {file = "pyav-13.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:126386f2f8a0c57487a3ad947ac573385d41326b5ff111783135cc56a8869261"}, - {file = "pyav-13.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:57d94282ffd445ab055c36e150fee1a4a066e0aee259260c82792dbd349ec08d"}, - {file = "pyav-13.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b2daf24cbc01ee666c4893e69aac8fc65bab598ea0029382857930f652a5e5ff"}, - {file = "pyav-13.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83e3a67e2038b8cfd1d5dd2d1a1756ac1143a4c223b1723e64ac8bdb2045fb6a"}, - {file = "pyav-13.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24de515886366b2c952e3827e7fb6466ad06f40b5cb34595a3f922899727be2b"}, - {file = "pyav-13.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66befb4172facfaaf7f3be94b1659051378b0741f087d5b46d2a25b6bce34b4f"}, - {file = "pyav-13.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a65d060fceee59e5a1dd70e64bf6ffca55fff2b596af906b206d8ba0057bbdc5"}, - {file = "pyav-13.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8450899845220a2a4f3ecc3eba0d5f864c169d98a9892be75447e59480162a09"}, - {file = "pyav-13.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6b21df5daadbb019c4612cc89923202ad7a4dd259be905eba56887a14a344861"}, - {file = "pyav-13.1.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21668b5ea9c4f046f61193a555d3deb2ca633b2ffb27a22a3b0eb03e8da64992"}, - {file = "pyav-13.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ae2413955b7d76826d214d3a5b719714f352de7de318e45275811fa07b9efe3"}, - {file = "pyav-13.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:a3ba8764bbf958e6c94b0dc7b07f670b4a759a157547a69cddc58eabba8aea1d"}, - {file = "pyav-13.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c92ef209e12660c6a75f81c9d228adc1e07294b875bf91d9b2a58c44a728b2d3"}, - {file = "pyav-13.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2e1855824313c17367c5ba658cf99d8b3169e0c3e0bdef5aa87a4c472c46d72b"}, - {file = "pyav-13.1.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c7a2eb79af1d3414509e31631a1b837b011eba4a21e311ae1308eca95a9f4db"}, - {file = "pyav-13.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69566d6b5438259e2e4adc2975591d513b7f1280fbf4ed3e0901be10a4567470"}, - {file = "pyav-13.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2960397dd97d7462effe8e6696557a91f24c2841edf391b0355734db8e4b02cd"}, - {file = "pyav-13.1.0.tar.gz", hash = "sha256:7049f4df6f94b4b727c1339a094f29c4178f3e0c290a01b9fcf0190a9890704c"}, + {file = "pyav-14.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:34244e5be070952a92eb4a66e48e7f24a6c340c5418e331644021a2c965e33b8"}, + {file = "pyav-14.0.1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:265706b203b18a38308bb4c5c8e982ae91d2d8865b7c3b889607447d061e98c8"}, + {file = "pyav-14.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f5b2aca6e579d18ba9a1b0dbe50b5c5484bc09a701b439ee57a2442ac4b702"}, + {file = "pyav-14.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc602f98007de775187f31a377fe9f2df69cba9ed0ef2db8f696e7f199a1f40f"}, + {file = "pyav-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b284357cfd366ff9a04d70db4487d15dbe7f376f512da90897673d3dd3f299c9"}, + {file = "pyav-14.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:a5c1f9b0927734a308fc8c34e4c66b2ca3965b0dfe3a0d6885c6bcdc5ed87b35"}, + {file = "pyav-14.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b12ba1cd4ba633eec7b87dc892072f727a70ad2de032280471ed66d6026c6854"}, + {file = "pyav-14.0.1-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:60fd747cfda70ae7b6250bf70f48b0d1a873335dfbdf5b381640e2392e96c545"}, + {file = "pyav-14.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:449e9705dce0205d1119a86a15b252f31862b366a1b7314547fd4026784b98d6"}, + {file = "pyav-14.0.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:77f23c25bb2e4beb00c3359dbb2f155d826d84db73c1cae219631c766e31f83d"}, + {file = "pyav-14.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf31c61329ba09bc2901edcaba9e20af20ac7a04402efbc5912f8985c8adcc88"}, + {file = "pyav-14.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:c16cfd56e104ac8edb68720693b825568efafa87a5c1d1ecd65c393821b086a9"}, + {file = "pyav-14.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:67006c9fc7b10a75d41b0e0e7639eba362fc14bd79b23ad75c05f8187468ed1e"}, + {file = "pyav-14.0.1-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:14a014159723067e6b8ebeb0d9c52c8af7d48771cfa56c38a2665d3accfa9d05"}, + {file = "pyav-14.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4ba2035f4c592dc60acada5dba535fcae81ee7bd10b55dc9dd69d0d302f30cf"}, + {file = "pyav-14.0.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e64ee2accb5357fdb3c3925cd50307c52ee419de021e0fc17a7a56a5346bdc"}, + {file = "pyav-14.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2f93223b71ee004e363f4cbbe11e247b545bef9060287df3a8d04f9b8047c0"}, + {file = "pyav-14.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:999c023dd610fdb2254be586043550eb5a48cc2f63751acf20876da5bfdfd988"}, + {file = "pyav-14.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b3f9d94f45f001a76d8ab1ead92d3d0b6e98e2718316bc2a69ff714af9dd8ac0"}, + {file = "pyav-14.0.1-cp313-cp313-macosx_11_0_x86_64.whl", hash = "sha256:ffeaad9a9aeb9fca2697fa2e7ea760da455d074522e4e03917d8ca5372a63d2a"}, + {file = "pyav-14.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65cb32ae4aaa515f925fae0c85f98cab3acc82e20f3f5b85c3ce698b69da294d"}, + {file = "pyav-14.0.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:77523b8443c6551981d8fafee74ecf5ae930df89061f3cec615657c5acb83f28"}, + {file = "pyav-14.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebe79868f02e994a50617d6332274f9d5c1bdce75f44310d004498b66413bbf7"}, + {file = "pyav-14.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:87fdccea6028746484e8c4cc4db138aef61082dffd653f68380a13cccf3fbd39"}, + {file = "pyav-14.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ce7959abf510c74756b5c8fbe9aab7965f895aea307f74ceaa08b5a4b4fe1d2d"}, + {file = "pyav-14.0.1-pp310-pypy310_pp73-macosx_11_0_x86_64.whl", hash = "sha256:80dba3f28f0a21d8e72227b0935ddec7a9fc0032455634d6b2ab0208badda5fb"}, + {file = "pyav-14.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff6bd435746af0e870490c12919c9b533e8408fdbe92c3fa96f7090dbc623e64"}, + {file = "pyav-14.0.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bed45cc31617199af08ebbe4a28e94b23542c9b0d295432f07d64a0a851279ad"}, + {file = "pyav-14.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c0e3abca4bf79447429f067b13002ba45d964fd9369430b197c8dd4bdcab133"}, + {file = "pyav-14.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:abeace5e8a36c67dde42861d631175359ecdedaa2c5e7c8258caabf51de6a8b9"}, + {file = "pyav-14.0.1.tar.gz", hash = "sha256:6eda146e02a4bfcc9b5df5ee8097528f538bc6db94acbbec2a0004d90a9dc472"}, ] [[package]] @@ -5041,6 +5006,138 @@ files = [ {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] +[[package]] +name = "pydantic" +version = "2.10.4" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.10.4-py3-none-any.whl", hash = "sha256:597e135ea68be3a37552fb524bc7d0d66dcf93d395acd93a00682f1efcb8ee3d"}, + {file = "pydantic-2.10.4.tar.gz", hash = "sha256:82f12e9723da6de4fe2ba888b5971157b3be7ad914267dea8f05f82b28254f06"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + [[package]] name = "pygame" version = "2.6.1" @@ -5112,24 +5209,24 @@ files = [ [[package]] name = "pyglet" -version = "2.0.18" +version = "2.0.20" description = "pyglet is a cross-platform games and multimedia package." optional = true python-versions = ">=3.8" files = [ - {file = "pyglet-2.0.18-py3-none-any.whl", hash = "sha256:e592952ae0297e456c587b6486ed8c3e5f9d0c3519d517bb92dde5fdf4c26b41"}, - {file = "pyglet-2.0.18.tar.gz", hash = "sha256:7cf9238d70082a2da282759679f8a011cc979753a32224a8ead8ed80e48f99dc"}, + {file = "pyglet-2.0.20-py3-none-any.whl", hash = "sha256:341cdc506fe97c4d8c4fb35aac89cefcb0ca6bf59eddcf2d1078c327dde1f02e"}, + {file = "pyglet-2.0.20.tar.gz", hash = "sha256:702ea52b1fc1b6447904d2edd579212b29f1b3475e098ac49b57647a064accb7"}, ] [[package]] name = "pygments" -version = "2.18.0" +version = "2.19.0" description = "Pygments is a syntax highlighting package written in Python." optional = true python-versions = ">=3.8" files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, + {file = "pygments-2.19.0-py3-none-any.whl", hash = "sha256:4755e6e64d22161d5b61432c0600c923c5927214e7c956e31c23923c89251a9b"}, + {file = "pygments-2.19.0.tar.gz", hash = "sha256:afc4146269910d4bdfabcd27c24923137a74d562a23a320a41a55ad303e19783"}, ] [package.extras] @@ -5137,73 +5234,67 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymunk" -version = "6.8.1" +version = "6.10.0" description = "Pymunk is a easy-to-use pythonic 2D physics library" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pymunk-6.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4203cb73ab1ecffbe6ff2c903542987828eec204acb012eba41592303a63a85c"}, - {file = "pymunk-6.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aff5d00f05f78ab98f3cb699ba417db1eca1fe07ac88cb0f70a850d1f06d94bb"}, - {file = "pymunk-6.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c9357179aba3501d250ce8a8b62ad59968c0e9be4ea330a31aab70d4907b5fd"}, - {file = "pymunk-6.8.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c689886456f53554e8961bd4d3addc0bbe25999afa55c2990d59543dd6ad1bc"}, - {file = "pymunk-6.8.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:94cc4322041fa6ba429dee897e9d269339cd6fa15ea5b46783b7f67ccf31c8f4"}, - {file = "pymunk-6.8.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:acd808e4c9596d521308816eacc077eff758255713b156dad6d7d666f98d71ac"}, - {file = "pymunk-6.8.1-cp310-cp310-win32.whl", hash = "sha256:c17fd57e40fc2cfd63bef693a8a90cc9a033665ecebbd0cd989482bb188857ed"}, - {file = "pymunk-6.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:655722a1d907ab66c2a5aaffd469cd997aa79f02860dd974e7475783945bd1a0"}, - {file = "pymunk-6.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8a6d1615c7665dabd33768e40b81eaf1bbce65d36d54f0cc17d415aa1d98f249"}, - {file = "pymunk-6.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a0980cc537ef149af688cd0dbf0e447f924eb05818e9cb92c7950342e5eba7ce"}, - {file = "pymunk-6.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d353ffc0396115ebcd8a006fc97106d3b3f91adc842fa0f451c872cdbb21128"}, - {file = "pymunk-6.8.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e6bd6600628ad87b56645ee0ffc24d6623f2c941b5c4aa5058722ab17335d80"}, - {file = "pymunk-6.8.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6e66d22814cccada953d126dadc0578dca0e6eb39431523e533701d3ba4c3fac"}, - {file = "pymunk-6.8.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:84bc02b5512cf60c38cf9c9ecff24eb733437a28de20f4b362a1c27400e23ed3"}, - {file = "pymunk-6.8.1-cp311-cp311-win32.whl", hash = "sha256:64ea1acf1c2f29a03c2121db39107253d67c4f800e8c472c2a599808103b5e99"}, - {file = "pymunk-6.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ef1a82fb68dd3f2e3351591cbf594fce4e49a80271ebb7af643d41a53d95a23"}, - {file = "pymunk-6.8.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c2961a239430f6326932f51274746fd5236d6b274d7b2b84fd44b9c2a73a888b"}, - {file = "pymunk-6.8.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb1ae0194393165c57974c8edc2cfda144a9b20dd2906cb38ec22dfb65c7fa88"}, - {file = "pymunk-6.8.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8320917fc7ed750ccc22a8f67a5b5a375b31c5492d491ef3783e87537887a4"}, - {file = "pymunk-6.8.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:704da8ff41eb06d4987003a8a0f2ef603dde9bf224bf6f85356f6dace5e23ac5"}, - {file = "pymunk-6.8.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6e5c4e596cf9d715cae26330a464335f39ca25a61a923f14eaac720b82b6a6be"}, - {file = "pymunk-6.8.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9d4b9c31cea1dc75719cb3ae45505034ed51bde92799b598f8fb234b06dac33a"}, - {file = "pymunk-6.8.1-cp312-cp312-win32.whl", hash = "sha256:d7e12be02ed37e323598ccae914386f55eb20d5ee08c013df1b43141ef8a6b56"}, - {file = "pymunk-6.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:e510666591d6cef0ab5f7752f2796d244c3131980a35106a6f3a0c03c34a378c"}, - {file = "pymunk-6.8.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:abbf77226e4e8a7e9140ae452b764954c071ba62031e3db9ea65549c9f88e495"}, - {file = "pymunk-6.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:250fdd54cc8dc3f0ebb67b41e8caf1400ce3ca882c513540a7925dac5cec6392"}, - {file = "pymunk-6.8.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:279b3896a79c8701a69a5cefdcb7c98c4bc50687208e488f90c50bd5014e9871"}, - {file = "pymunk-6.8.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:2a844c7be3cb39da5dc546e30158f6f971fc509e63b3937c34be5a54b9d6457d"}, - {file = "pymunk-6.8.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:e2a1480d9fa6db0ff3815a4b70a499be7992a648a4fcd3883677ec52b6e0d1cd"}, - {file = "pymunk-6.8.1-cp37-cp37m-win32.whl", hash = "sha256:da606cd607ea0bed9e98c7bf414f588feb5decf66530a60369e095ac7a9d0c14"}, - {file = "pymunk-6.8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:add65195ebc7e9b94b1aaaca16064f4290094ead3971d0b53294a9531e39e96f"}, - {file = "pymunk-6.8.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:821eaa9b843e385315f1981fc97a352dc20bdc65c645e852bd11382764bad860"}, - {file = "pymunk-6.8.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:05ac390f59a0ae385af7912b97c5a5240694a3c3b2775e3975abbfdafdb7edc4"}, - {file = "pymunk-6.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59f4b0b8af9ebdebbd2831452d1ebcd29cf0ae2a7968c24d40c90fdcef8746d9"}, - {file = "pymunk-6.8.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:afce20a8eea3da231a366329b513172b60009615dab4ebcc3c2a3f320ec33306"}, - {file = "pymunk-6.8.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7b6d8bec17f4ce008eced7c46fdc80aa563ec5a9b383471031e7a94dece5eb8d"}, - {file = "pymunk-6.8.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:00f66f6232263e80432fa34a3cd05065f6a1c9c2a29b50a26938a11b8a2bf6eb"}, - {file = "pymunk-6.8.1-cp38-cp38-win32.whl", hash = "sha256:19c2589a53ad97ce08473b69b76c5af6fb24b90f1da734ccfaaae21f50616095"}, - {file = "pymunk-6.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:6de1d49b6976ea46589caba9275165b85adbdcd93d744ae5e30ddce853f54502"}, - {file = "pymunk-6.8.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:59c333e03f1d772c6872b973629b1b780b117744e3ef74badbb02c2ecd4cd28d"}, - {file = "pymunk-6.8.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:923c53ec3306b3cea4d9f5bc9beefb668e4318187057d3a89c470fa88a912bc1"}, - {file = "pymunk-6.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcb5fc5ae53a78aadc123750d9341ec3e99539a9f3ba7d2fca70ecfc690272f8"}, - {file = "pymunk-6.8.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f18c715c837cfc1c76f2865a79cee72e934c6bb3197042d328e6af9cda2e85f"}, - {file = "pymunk-6.8.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:9226ac3008d47e55c8cef3464b0f257ff1e613baac46b8adebbf832421ba008d"}, - {file = "pymunk-6.8.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e8b00579859194250fe574828d5f83907fa71af3fa1f5eb6c9887343feb2e7f"}, - {file = "pymunk-6.8.1-cp39-cp39-win32.whl", hash = "sha256:96ac4537c23fe5310339ef8dec82a923a0540ab16668ece2ece26cb8986a4358"}, - {file = "pymunk-6.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:e2b7f83b2d1fc8e6b2b01f8627e52bc84a10758c53a58df7a932958d9593de71"}, - {file = "pymunk-6.8.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8aa4fc263155f9f515b371a8d428f1769cdcebe0e772a26990c8a2ba2e6240f2"}, - {file = "pymunk-6.8.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:67ba6be15d3d06b49f7693d3ad3f271638e4991791edf968a292de0185f3a25d"}, - {file = "pymunk-6.8.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85624fab1f23a757a689d3c4404ba8629eeccbb0d37cd937d6d381e47f9e8bc3"}, - {file = "pymunk-6.8.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c12c542e2ff8a953b390473dea02dc84e5d40d7b71f87dd36ce9ab242718444"}, - {file = "pymunk-6.8.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4eadb3f96abcb36c570a0e560d81cdafd2bc4b5b89f1230e42ec7581405ab643"}, - {file = "pymunk-6.8.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:66c0a56c5c816519c917f996361f3fb673d3ebccaad8c5d4d14764629a14e906"}, - {file = "pymunk-6.8.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:90cd20090aab3001e400406aa782dcfc798adb949e98fcd84182d108da050c00"}, - {file = "pymunk-6.8.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c58fc4fcfc1ea988596bc1198270ccd255572a95d929b43a1fc40424cb7a7d64"}, - {file = "pymunk-6.8.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e94c41a41c1019bec97db6624ae793fb2dceb68f668a80c74c7def3d80286f3f"}, - {file = "pymunk-6.8.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2900fed03f8a4cccbb94c640bdadb1230f0cf3f9667b775df7bd563bbd0c6231"}, - {file = "pymunk-6.8.1.tar.gz", hash = "sha256:e04061e9d05c6163b83bb15a39b595da199bb5f500b06acb5f2d50f3d61ef429"}, -] - -[package.dependencies] -cffi = ">=1.15.0" + {file = "pymunk-6.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d881a4fe98d512d7fd20e6af14f721e370faa7188906814dbf21330be1f8339a"}, + {file = "pymunk-6.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8430bc96f6c8bfe60910c71e01e85b77ed0d173e1ba8c63015d2e2dbb335da5"}, + {file = "pymunk-6.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac106eb41b32eabeee67a4253c0a4d220efb77667f4adb34c1bd9d034f7fef09"}, + {file = "pymunk-6.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8865fde401c2890eda5f45633e9b6155b1d2fd13f2be3b141b51f955062b2c7"}, + {file = "pymunk-6.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ad0ab5dad499731f65efa4220017e5a13ac16b0c0f2d1c322a6e5e415462785d"}, + {file = "pymunk-6.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9a39c4625a25635d1a67b0a4daaf51260b699e65fce1ba0232795fd498066fa4"}, + {file = "pymunk-6.10.0-cp310-cp310-win32.whl", hash = "sha256:c9227346e2064e35b6ffff5c2066a7fa78d33de850b19400e960dd6a5399096c"}, + {file = "pymunk-6.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:828ec1e7343848e06bcb483eda6451065d9b376dbd3befd604810ba28861392f"}, + {file = "pymunk-6.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea8e2b3695fbcf7698a6591809d505caeeb291cbd98b08c0d5dc905508c244b8"}, + {file = "pymunk-6.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62213baf7de3fe38e792f6cdadb72388298c05b8e463edc1b4d0991b41376eea"}, + {file = "pymunk-6.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78da9d520348f2f2b33109534895b4aa371882ad917cec4912621cf2b5f74dcc"}, + {file = "pymunk-6.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64578c2f31a27d75b6e6ad30bbd92a3ea58af926b0bdb3037638fd36d8d1c34d"}, + {file = "pymunk-6.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d6d775acf2b1b1c5c70d4b1db96182f1edf5b6a9950893ad3d3b6ccaf9b83796"}, + {file = "pymunk-6.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acf5a45e340fc43a4e03e72f6553c3c16c7e8d80ea36a5c24dc2569f12fd56ad"}, + {file = "pymunk-6.10.0-cp311-cp311-win32.whl", hash = "sha256:0c4052cd297ed1eeb640384a7ca27a7fd8db08ce4836e0192bf37d40216d5a26"}, + {file = "pymunk-6.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:5897600699a60998740e9c9ffe23702379db26b3de61c104b7cfd5cdddbf42a5"}, + {file = "pymunk-6.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:979469dcd45cc3ee51bc14a02dafd0ff6c45eee62c3f12a320f46f640bf66243"}, + {file = "pymunk-6.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:75d7a820609d7ef175c838aee15a78be55379917e5d2adba72c404baaad758e8"}, + {file = "pymunk-6.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:601695b508576cf195055209d806f3d9b35338345503f13176b18f96ccfe2a9e"}, + {file = "pymunk-6.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35e647f7a37ada409d6c2e5e10457f8123297bc7737a3f40167698c9229b81a4"}, + {file = "pymunk-6.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a0c3df60e090913315e1e7f9943842b6e74aa90cb3fc6601a4d302c5bede8a1e"}, + {file = "pymunk-6.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1ecda33e4475844917648fc0e8a42e7231439d4bb163721dda932e0da3fd715b"}, + {file = "pymunk-6.10.0-cp312-cp312-win32.whl", hash = "sha256:cfabc14acb5bf63d3d42121aed45206e02ac5f0c11d2d5361a1721fcee918947"}, + {file = "pymunk-6.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:e211e7995b8dd18f859cfee27c1700d1b2a11d6909b01d18f8e26b72759895f0"}, + {file = "pymunk-6.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fc5961cc462f4604e18790e04a7c422ceb36487645a7fefded74d8b7bf55ad80"}, + {file = "pymunk-6.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:adf6f78a6c00d27f8de02cecd34dcdb4571039149c98f072ee0c33c519fcfda1"}, + {file = "pymunk-6.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1db0d526d418a1cfa04244e5f5fee46e61f090722862a78681a0c7fa75c906"}, + {file = "pymunk-6.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d7ede398a33daeb50852aff1516bbe97e088539ba6e7551a5e1e4caff162bfd"}, + {file = "pymunk-6.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:11e64a4dd2656bfd9d75f7ed89e33a0089f555115b38ef9f5ddbb5b14591b129"}, + {file = "pymunk-6.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8a415290c5e1bc3bdc2a158dc8f9966b80be5782744e64f85e5564709c06c1b1"}, + {file = "pymunk-6.10.0-cp313-cp313-win32.whl", hash = "sha256:86271625f9a3e5801ad482af3a747378811c61369b089f9350cc9311701c75b3"}, + {file = "pymunk-6.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:2434b66b004d1b218fff3ea09399e7e6debf726868e73a4561ff76606dd1b800"}, + {file = "pymunk-6.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b2ba8f95452ec703eb21e842fc0a3a0ac30538c4d3fb8801e9aa320d89389ed"}, + {file = "pymunk-6.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:308a7b901818319616bc4bcd9a1bd2d45e25df98d8989b4a4764875abeee3cc5"}, + {file = "pymunk-6.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d48ad08dd5c498a346bb554c6afe6464739ca15b40a93e6000220a1cf388455"}, + {file = "pymunk-6.10.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af5e390bb01ca1e2ba5fdee589e35b23aabaef6310a3ea5e0836e41d0abed511"}, + {file = "pymunk-6.10.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:d999ee73139c54d61bf28c36dd3c4c8185c8e0730b2d378546ea51c0f9623a47"}, + {file = "pymunk-6.10.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fe43a8af85b76270c480ff2704b54d8b14b1f5e6532e6c04998110343d154e62"}, + {file = "pymunk-6.10.0-cp38-cp38-win32.whl", hash = "sha256:f0d0ed9e323308a6fc77576fad31376debc876165a81bd7cb196f4b7144d6572"}, + {file = "pymunk-6.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:be0a5b665f2e5757a91e537a9623c9d43e519e71f1a288a1b917a5626c491f9f"}, + {file = "pymunk-6.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4123103d5107ee7018c833d529289f00a284f19d80f3ad95efb9a4a25ce42397"}, + {file = "pymunk-6.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f7d6f33ff8016861255229d8db0e34cce1c1db7947432a76e5cb16bdb3f716a1"}, + {file = "pymunk-6.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f38d4a1074a044a10d8c4d463cd548b725537093e3770ff6102776c374db3799"}, + {file = "pymunk-6.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aa4cfc3f74a0a413729a8e08aea3c191000fb5e3cec388f19802e59a03fc92b"}, + {file = "pymunk-6.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f638b7e7125b11a79fed58029c28b61c4905d56565b68ac728cb207cd09296d3"}, + {file = "pymunk-6.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2118d0e18a4e358a999848bd0e17f7ac676adef69ec072c199c10cefea5b24b8"}, + {file = "pymunk-6.10.0-cp39-cp39-win32.whl", hash = "sha256:6cab50a5a29345a8958bc193d47e1835135d1b7628f5b41c573a3420cc4a1a77"}, + {file = "pymunk-6.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:9e957025f24b1b1f361fbcb39f4f4c12f2f05ae0c658da2cdcf1c0f23aff2327"}, + {file = "pymunk-6.10.0.tar.gz", hash = "sha256:62f7c7247c05b8441fb0e1937532c3d3b9c21a46aa48f3881cf5c892cf2acb25"}, +] + +[package.dependencies] +cffi = [ + {version = ">=1.17.1", markers = "platform_system != \"Emscripten\""}, + {version = ">1.14.0", markers = "platform_system == \"Emscripten\""}, +] [package.extras] dev = ["aafigure", "matplotlib", "numpy", "pygame", "pyglet (<2.0.0)", "sphinx", "wheel"] @@ -5227,106 +5318,111 @@ six = "*" [[package]] name = "pyobjc-core" -version = "10.3.1" +version = "10.3.2" description = "Python<->ObjC Interoperability Module" optional = true python-versions = ">=3.8" files = [ - {file = "pyobjc_core-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ea46d2cda17921e417085ac6286d43ae448113158afcf39e0abe484c58fb3d78"}, - {file = "pyobjc_core-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:899d3c84d2933d292c808f385dc881a140cf08632907845043a333a9d7c899f9"}, - {file = "pyobjc_core-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:6ff5823d13d0a534cdc17fa4ad47cf5bee4846ce0fd27fc40012e12b46db571b"}, - {file = "pyobjc_core-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2581e8e68885bcb0e11ec619e81ef28e08ee3fac4de20d8cc83bc5af5bcf4a90"}, - {file = "pyobjc_core-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea98d4c2ec39ca29e62e0327db21418696161fb138ee6278daf2acbedf7ce504"}, - {file = "pyobjc_core-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:4c179c26ee2123d0aabffb9dbc60324b62b6f8614fb2c2328b09386ef59ef6d8"}, - {file = "pyobjc_core-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cb901fce65c9be420c40d8a6ee6fff5ff27c6945f44fd7191989b982baa66dea"}, - {file = "pyobjc_core-10.3.1.tar.gz", hash = "sha256:b204a80ccc070f9ab3f8af423a3a25a6fd787e228508d00c4c30f8ac538ba720"}, + {file = "pyobjc_core-10.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:acb40672d682851a5c7fd84e5041c4d069b62076168d72591abb5fcc871bb039"}, + {file = "pyobjc_core-10.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cea5e77659619ad93c782ca07644b6efe7d7ec6f59e46128843a0a87c1af511a"}, + {file = "pyobjc_core-10.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:16644a92fb9661de841ba6115e5354db06a1d193a5e239046e840013c7b3874d"}, + {file = "pyobjc_core-10.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:76b8b911d94501dac89821df349b1860bb770dce102a1a293f524b5b09dd9462"}, + {file = "pyobjc_core-10.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:8c6288fdb210b64115760a4504efbc4daffdc390d309e9318eb0e3e3b78d2828"}, + {file = "pyobjc_core-10.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:87901e9f7032f33eb4fa884e407bf2744d5a0791b379bfca783982a02be3f7fb"}, + {file = "pyobjc_core-10.3.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:636971ab48a4198ca129e149fe58ccf85a7b4a9b93d27f5ae920d88eb2655431"}, + {file = "pyobjc_core-10.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:48e9ac3af42b2340dae709a8b894f5ef7e5132d8546adcd1797cffcc449dabdc"}, + {file = "pyobjc_core-10.3.2.tar.gz", hash = "sha256:dbf1475d864ce594288ce03e94e3a98dc7f0e4639971eb1e312bdf6661c21e0e"}, ] [[package]] name = "pyobjc-framework-applicationservices" -version = "10.3.1" +version = "10.3.2" description = "Wrappers for the framework ApplicationServices on macOS" optional = true python-versions = ">=3.8" files = [ - {file = "pyobjc_framework_ApplicationServices-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b694260d423c470cb90c3a7009cfde93e332ea6fb4b9b9526ad3acbd33460e3d"}, - {file = "pyobjc_framework_ApplicationServices-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d886ba1f65df47b77ff7546f3fc9bc7d08cfb6b3c04433b719f6b0689a2c0d1f"}, - {file = "pyobjc_framework_ApplicationServices-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:be157f2c3ffb254064ef38249670af8cada5e519a714d2aa5da3740934d89bc8"}, - {file = "pyobjc_framework_ApplicationServices-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:57737f41731661e4a3b78793ec9173f61242a32fa560c3e4e58484465d049c32"}, - {file = "pyobjc_framework_ApplicationServices-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c429eca69ee675e781e4e55f79e939196b47f02560ad865b1ba9ac753b90bd77"}, - {file = "pyobjc_framework_ApplicationServices-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:4f1814a17041a20adca454044080b52e39a4ebc567ad2c6a48866dd4beaa192a"}, - {file = "pyobjc_framework_ApplicationServices-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1252f1137f83eb2c6b9968d8c591363e8859dd2484bc9441d8f365bcfb43a0e4"}, - {file = "pyobjc_framework_applicationservices-10.3.1.tar.gz", hash = "sha256:f27cb64aa4d129ce671fd42638c985eb2a56d544214a95fe3214a007eacc4790"}, + {file = "pyobjc_framework_ApplicationServices-10.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e0d5d7d23a406508d59fee53bb91b1f559c055d744edc3172669b3fb0f9941b"}, + {file = "pyobjc_framework_ApplicationServices-10.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0a0b47a0371246a02efcf9335ae3d18166e80e4237e25c25a13993f8df5cc1d"}, + {file = "pyobjc_framework_ApplicationServices-10.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b9174444599b6adf37c1d28915445d716324f1cdc70a1818f7cb4f181caeee1b"}, + {file = "pyobjc_framework_ApplicationServices-10.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:18ea759e4792d3ed9e8b94f0d96f6fece647e365d0bb09bb935c32262822fe01"}, + {file = "pyobjc_framework_ApplicationServices-10.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1b1db81225b993cd6f93c7271e13b0bbdfd3c89fae6f7111b21dd8933fab1269"}, + {file = "pyobjc_framework_ApplicationServices-10.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ae434e7812c82bf959efaa1f7592bd3bb2ea47ce4eb90e4106ff901d81ecb49c"}, + {file = "pyobjc_framework_ApplicationServices-10.3.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:3ba30d55f0c31066e20c850c3ddeef4e728805d1957e235d0dcec6cadd3d4b90"}, + {file = "pyobjc_framework_ApplicationServices-10.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:323121f45aaa09dd8607e0554beb831184921ecaf69cd540debd91f2926d3b06"}, + {file = "pyobjc_framework_applicationservices-10.3.2.tar.gz", hash = "sha256:2116c3854ac07c022268eebc7cb40ccba30727df78442e57e0280b5193c8183c"}, ] [package.dependencies] -pyobjc-core = ">=10.3.1" -pyobjc-framework-Cocoa = ">=10.3.1" -pyobjc-framework-CoreText = ">=10.3.1" -pyobjc-framework-Quartz = ">=10.3.1" +pyobjc-core = ">=10.3.2" +pyobjc-framework-Cocoa = ">=10.3.2" +pyobjc-framework-CoreText = ">=10.3.2" +pyobjc-framework-Quartz = ">=10.3.2" [[package]] name = "pyobjc-framework-cocoa" -version = "10.3.1" +version = "10.3.2" description = "Wrappers for the Cocoa frameworks on macOS" optional = true python-versions = ">=3.8" files = [ - {file = "pyobjc_framework_Cocoa-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4cb4f8491ab4d9b59f5187e42383f819f7a46306a4fa25b84f126776305291d1"}, - {file = "pyobjc_framework_Cocoa-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5f31021f4f8fdf873b57a97ee1f3c1620dbe285e0b4eaed73dd0005eb72fd773"}, - {file = "pyobjc_framework_Cocoa-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11b4e0bad4bbb44a4edda128612f03cdeab38644bbf174de0c13129715497296"}, - {file = "pyobjc_framework_Cocoa-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:de5e62e5ccf2871a94acf3bf79646b20ea893cc9db78afa8d1fe1b0d0f7cbdb0"}, - {file = "pyobjc_framework_Cocoa-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c5af24610ab639bd1f521ce4500484b40787f898f691b7a23da3339e6bc8b90"}, - {file = "pyobjc_framework_Cocoa-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:a7151186bb7805deea434fae9a4423335e6371d105f29e73cc2036c6779a9dbc"}, - {file = "pyobjc_framework_Cocoa-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:743d2a1ac08027fd09eab65814c79002a1d0421d7c0074ffd1217b6560889744"}, - {file = "pyobjc_framework_cocoa-10.3.1.tar.gz", hash = "sha256:1cf20714daaa986b488fb62d69713049f635c9d41a60c8da97d835710445281a"}, + {file = "pyobjc_framework_Cocoa-10.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:61f44c2adab28fdf3aa3d593c9497a2d9ceb9583ed9814adb48828c385d83ff4"}, + {file = "pyobjc_framework_Cocoa-10.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7caaf8b260e81b27b7b787332846f644b9423bfc1536f6ec24edbde59ab77a87"}, + {file = "pyobjc_framework_Cocoa-10.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:c49e99fc4b9e613fb308651b99d52a8a9ae9916c8ef27aa2f5d585b6678a59bf"}, + {file = "pyobjc_framework_Cocoa-10.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f1161b5713f9b9934c12649d73a6749617172e240f9431eff9e22175262fdfda"}, + {file = "pyobjc_framework_Cocoa-10.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:08e48b9ee4eb393447b2b781d16663b954bd10a26927df74f92e924c05568d89"}, + {file = "pyobjc_framework_Cocoa-10.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7faa448d2038ae0e0287a326d390002e744bb6470e45995e2dbd16c892e4495a"}, + {file = "pyobjc_framework_Cocoa-10.3.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:fcd53fee2be9708576617994b107aedc2c40824b648cd51e780e8399c0a447b6"}, + {file = "pyobjc_framework_Cocoa-10.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:838fcf0d10674bde9ff64a3f20c0e188f2dc5e804476d80509b81c4ac1dabc59"}, + {file = "pyobjc_framework_cocoa-10.3.2.tar.gz", hash = "sha256:673968e5435845bef969bfe374f31a1a6dc660c98608d2b84d5cae6eafa5c39d"}, ] [package.dependencies] -pyobjc-core = ">=10.3.1" +pyobjc-core = ">=10.3.2" [[package]] name = "pyobjc-framework-coretext" -version = "10.3.1" +version = "10.3.2" description = "Wrappers for the framework CoreText on macOS" optional = true python-versions = ">=3.8" files = [ - {file = "pyobjc_framework_CoreText-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd6123cfccc38e32be884d1a13fb62bd636ecb192b9e8ae2b8011c977dec229e"}, - {file = "pyobjc_framework_CoreText-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:834142a14235bd80edaef8d3a28d1e203ed3c988810a9b78005df7c561390288"}, - {file = "pyobjc_framework_CoreText-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ae6c09d29eeaf30a67aa70e08a465b1f1e47d12e22b3a34ae8bc8fdb7e2e7342"}, - {file = "pyobjc_framework_CoreText-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:51ca95df1db9401366f11a7467f64be57f9a0630d31c357237d4062df0216938"}, - {file = "pyobjc_framework_CoreText-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8b75bdc267945b3f33c937c108d79405baf9d7c4cd530f922e5df243082a5031"}, - {file = "pyobjc_framework_CoreText-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:029b24c338f58fc32a004256d8559507e4f366dfe4eb09d3144273d536012d90"}, - {file = "pyobjc_framework_CoreText-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:418a55047dbff999fcd2b78cca167c4105587020b6c51567cfa28993bbfdc8ed"}, - {file = "pyobjc_framework_coretext-10.3.1.tar.gz", hash = "sha256:b8fa2d5078ed774431ae64ba886156e319aec0b8c6cc23dabfd86778265b416f"}, + {file = "pyobjc_framework_CoreText-10.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ead0b5b28031259d8874d641887fcbe106a8325773e142b054532859eb3d9ad3"}, + {file = "pyobjc_framework_CoreText-10.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c3b3cdf462442294319472bdacb013ce57f63f99325fa885b4b4a54a25bce201"}, + {file = "pyobjc_framework_CoreText-10.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:6be644434ac69969cbf3cd4638ab0dfa5485da399d0e79e52b006658346d3881"}, + {file = "pyobjc_framework_CoreText-10.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1721a16419dd75cedf87239fcb8e4739057d3b63d23378f4b38bda12acbe815b"}, + {file = "pyobjc_framework_CoreText-10.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:685f3b3c2a65bf0f6709ea0e420ee1dac2610c939fe151a055feb8e7b477b845"}, + {file = "pyobjc_framework_CoreText-10.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d39bb096d27707f905f305b820fc29e3b5d55d704a6fd9520398e295d4a2cce6"}, + {file = "pyobjc_framework_CoreText-10.3.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:6834b003ffe652f0de92144a34b9ce2d4b000828df9c4d717be8bc955076b588"}, + {file = "pyobjc_framework_CoreText-10.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e76bab8328a729939035962d0cbbd2a191b3c02fae85431ea77b4469aa0c491d"}, + {file = "pyobjc_framework_coretext-10.3.2.tar.gz", hash = "sha256:b1184146c628ba59c21c59eaa8e12256118daf8823deb7fb12013ecdfbc7f578"}, ] [package.dependencies] -pyobjc-core = ">=10.3.1" -pyobjc-framework-Cocoa = ">=10.3.1" -pyobjc-framework-Quartz = ">=10.3.1" +pyobjc-core = ">=10.3.2" +pyobjc-framework-Cocoa = ">=10.3.2" +pyobjc-framework-Quartz = ">=10.3.2" [[package]] name = "pyobjc-framework-quartz" -version = "10.3.1" +version = "10.3.2" description = "Wrappers for the Quartz frameworks on macOS" optional = true python-versions = ">=3.8" files = [ - {file = "pyobjc_framework_Quartz-10.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ef4fd315ed2bc42ef77fdeb2bae28a88ec986bd7b8079a87ba3b3475348f96e"}, - {file = "pyobjc_framework_Quartz-10.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:96578d4a3e70164efe44ad7dc320ecd4e211758ffcde5dcd694de1bbdfe090a4"}, - {file = "pyobjc_framework_Quartz-10.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ca35f92486869a41847a1703bb176aab8a53dbfd8e678d1f4d68d8e6e1581c71"}, - {file = "pyobjc_framework_Quartz-10.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:00a0933267e3a46ea4afcc35d117b2efb920f06de797fa66279c52e7057e3590"}, - {file = "pyobjc_framework_Quartz-10.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a161bedb4c5257a02ad56a910cd7eefb28bdb0ea78607df0d70ed4efe4ea54c1"}, - {file = "pyobjc_framework_Quartz-10.3.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:d7a8028e117a94923a511944bfa9daf9744e212f06cf89010c60934a479863a5"}, - {file = "pyobjc_framework_Quartz-10.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:de00c983b3267eb26fa42c6ed9f15e2bf006bde8afa7fe2b390646aa21a5d6fc"}, - {file = "pyobjc_framework_quartz-10.3.1.tar.gz", hash = "sha256:b6d7e346d735c9a7f147cd78e6da79eeae416a0b7d3874644c83a23786c6f886"}, + {file = "pyobjc_framework_Quartz-10.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5574754c23895269751c2b78d2d2b33b6de415f562534a1432484558f0a5a293"}, + {file = "pyobjc_framework_Quartz-10.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4697f3ef1991f7877c201778005dc4098ced3d19d938ebf916384c8f795488d3"}, + {file = "pyobjc_framework_Quartz-10.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:604188ee8ff051ffe74a12cb3274403fe9c3fa02b15fc4132685c0f74285ffe5"}, + {file = "pyobjc_framework_Quartz-10.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9e27fb446e012c9571bc163cff5f3036e9e6fa5caca06b5d7882ad1c6b6aaf0c"}, + {file = "pyobjc_framework_Quartz-10.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:d5bd6ef96a3d08c97cf2aca43a819113cdff494b5abebcedd7cf23b6d6e711f4"}, + {file = "pyobjc_framework_Quartz-10.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d3b55ec27cffff18d98d73694001a211ad4cdf717f7d8ad76235f845771d8b5d"}, + {file = "pyobjc_framework_Quartz-10.3.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:a58826db7e71de4654e5215b46f00f7825b17991078c9ba74ca729a4da024f82"}, + {file = "pyobjc_framework_Quartz-10.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6ede1001c28d27fc76f89a3243b3127dbd7dd03f39a3324766ae895cdcd1ebf1"}, + {file = "pyobjc_framework_quartz-10.3.2.tar.gz", hash = "sha256:193e7752c93e2d1304f914e3a8c069f4b66de237376c5285ba7c72e9ee0e3b15"}, ] [package.dependencies] -pyobjc-core = ">=10.3.1" -pyobjc-framework-Cocoa = ">=10.3.1" +pyobjc-core = ">=10.3.2" +pyobjc-framework-Cocoa = ">=10.3.2" [[package]] name = "pyopengl" @@ -5341,13 +5437,13 @@ files = [ [[package]] name = "pyparsing" -version = "3.1.4" +version = "3.2.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = true -python-versions = ">=3.6.8" +python-versions = ">=3.9" files = [ - {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, - {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, + {file = "pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1"}, + {file = "pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a"}, ] [package.extras] @@ -5399,7 +5495,7 @@ trimesh = "*" [package.extras] dev = ["flake8", "pre-commit", "pytest", "pytest-cov", "tox"] -docs = ["sphinx", "sphinx-automodapi", "sphinx-rtd-theme"] +docs = ["sphinx", "sphinx-automodapi", "sphinx_rtd_theme"] [package.source] type = "git" @@ -5462,13 +5558,13 @@ files = [ [[package]] name = "pytest" -version = "8.3.3" +version = "8.3.4" description = "pytest: simple powerful testing with Python" optional = true python-versions = ">=3.8" files = [ - {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, - {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, + {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, + {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, ] [package.dependencies] @@ -5484,17 +5580,17 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments [[package]] name = "pytest-cov" -version = "5.0.0" +version = "6.0.0" description = "Pytest plugin for measuring coverage." optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, - {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, + {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"}, + {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"}, ] [package.dependencies] -coverage = {version = ">=5.2.1", extras = ["toml"]} +coverage = {version = ">=7.5", extras = ["toml"]} pytest = ">=4.6" [package.extras] @@ -5516,28 +5612,31 @@ six = ">=1.5" [[package]] name = "python-json-logger" -version = "2.0.7" -description = "A python library adding a json log formatter" +version = "3.2.1" +description = "JSON Log Formatter for the Python Logging Package" optional = true -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"}, - {file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"}, + {file = "python_json_logger-3.2.1-py3-none-any.whl", hash = "sha256:cdc17047eb5374bd311e748b42f99d71223f3b0e186f4206cc5d52aefe85b090"}, + {file = "python_json_logger-3.2.1.tar.gz", hash = "sha256:8eb0554ea17cb75b05d2848bc14fb02fbdbd9d6972120781b974380bfa162008"}, ] +[package.extras] +dev = ["backports.zoneinfo", "black", "build", "freezegun", "mdx_truly_sane_lists", "mike", "mkdocs", "mkdocs-awesome-pages-plugin", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-material (>=8.5)", "mkdocstrings[python]", "msgspec", "msgspec-python313-pre", "mypy", "orjson", "pylint", "pytest", "tzdata", "validate-pyproject[all]"] + [[package]] name = "python-utils" -version = "3.9.0" +version = "3.9.1" description = "Python Utils is a module with some convenient utilities not included with the standard Python install" optional = true -python-versions = ">3.9.0" +python-versions = ">=3.9.0" files = [ - {file = "python_utils-3.9.0-py2.py3-none-any.whl", hash = "sha256:a7719a5ef4bae7360d2a15c13b08c4e3c3e39b9df19bd16f119ff8d0cfeaafb7"}, - {file = "python_utils-3.9.0.tar.gz", hash = "sha256:3689556884e3ae53aec5a4c9f17b36e752a3e93a7ba2768c6553fc4dd6fa70ef"}, + {file = "python_utils-3.9.1-py2.py3-none-any.whl", hash = "sha256:0273d7363c7ad4b70999b2791d5ba6b55333d6f7a4e4c8b6b39fb82b5fab4613"}, + {file = "python_utils-3.9.1.tar.gz", hash = "sha256:eb574b4292415eb230f094cbf50ab5ef36e3579b8f09e9f2ba74af70891449a0"}, ] [package.dependencies] -typing-extensions = ">3.10.0.2" +typing_extensions = ">3.10.0.2" [package.extras] docs = ["mock", "python-utils", "sphinx"] @@ -5571,28 +5670,28 @@ files = [ [[package]] name = "pyusb" -version = "1.2.1" -description = "Python USB access module" +version = "1.3.0" +description = "Easy USB access for Python" optional = true -python-versions = ">=3.6.0" +python-versions = ">=3.9.0" files = [ - {file = "pyusb-1.2.1-py3-none-any.whl", hash = "sha256:2b4c7cb86dbadf044dfb9d3a4ff69fd217013dbe78a792177a3feb172449ea36"}, - {file = "pyusb-1.2.1.tar.gz", hash = "sha256:a4cc7404a203144754164b8b40994e2849fde1cfff06b08492f12fff9d9de7b9"}, + {file = "pyusb-1.3.0-py3-none-any.whl", hash = "sha256:ca192893cb8cf7df89f4fcf2fea5dc9265a51637459b14635638ea9c13bab765"}, + {file = "pyusb-1.3.0.tar.gz", hash = "sha256:7e6de8ef79e164ced020d8131cd17d45a3cdeefb7afdaf41d7a2cbf2378828c3"}, ] [[package]] name = "pywinpty" -version = "2.0.13" +version = "2.0.14" description = "Pseudo terminal support for Windows from Python." optional = true python-versions = ">=3.8" files = [ - {file = "pywinpty-2.0.13-cp310-none-win_amd64.whl", hash = "sha256:697bff211fb5a6508fee2dc6ff174ce03f34a9a233df9d8b5fe9c8ce4d5eaf56"}, - {file = "pywinpty-2.0.13-cp311-none-win_amd64.whl", hash = "sha256:b96fb14698db1284db84ca38c79f15b4cfdc3172065b5137383910567591fa99"}, - {file = "pywinpty-2.0.13-cp312-none-win_amd64.whl", hash = "sha256:2fd876b82ca750bb1333236ce98488c1be96b08f4f7647cfdf4129dfad83c2d4"}, - {file = "pywinpty-2.0.13-cp38-none-win_amd64.whl", hash = "sha256:61d420c2116c0212808d31625611b51caf621fe67f8a6377e2e8b617ea1c1f7d"}, - {file = "pywinpty-2.0.13-cp39-none-win_amd64.whl", hash = "sha256:71cb613a9ee24174730ac7ae439fd179ca34ccb8c5349e8d7b72ab5dea2c6f4b"}, - {file = "pywinpty-2.0.13.tar.gz", hash = "sha256:c34e32351a3313ddd0d7da23d27f835c860d32fe4ac814d372a3ea9594f41dde"}, + {file = "pywinpty-2.0.14-cp310-none-win_amd64.whl", hash = "sha256:0b149c2918c7974f575ba79f5a4aad58bd859a52fa9eb1296cc22aa412aa411f"}, + {file = "pywinpty-2.0.14-cp311-none-win_amd64.whl", hash = "sha256:cf2a43ac7065b3e0dc8510f8c1f13a75fb8fde805efa3b8cff7599a1ef497bc7"}, + {file = "pywinpty-2.0.14-cp312-none-win_amd64.whl", hash = "sha256:55dad362ef3e9408ade68fd173e4f9032b3ce08f68cfe7eacb2c263ea1179737"}, + {file = "pywinpty-2.0.14-cp313-none-win_amd64.whl", hash = "sha256:074fb988a56ec79ca90ed03a896d40707131897cefb8f76f926e3834227f2819"}, + {file = "pywinpty-2.0.14-cp39-none-win_amd64.whl", hash = "sha256:5725fd56f73c0531ec218663bd8c8ff5acc43c78962fab28564871b5fce053fd"}, + {file = "pywinpty-2.0.14.tar.gz", hash = "sha256:18bd9529e4a5daf2d9719aa17788ba6013e594ae94c5a0c27e83df3278b0660e"}, ] [[package]] @@ -5812,105 +5911,105 @@ rpds-py = ">=0.7.0" [[package]] name = "regex" -version = "2024.9.11" +version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, - {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, - {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, - {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, - {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, - {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, - {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, - {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, - {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, - {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, - {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, - {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, - {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, - {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, ] [[package]] @@ -5950,27 +6049,27 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rerun-sdk" -version = "0.18.2" +version = "0.21.0" description = "The Rerun Logging SDK" optional = false -python-versions = "<3.13,>=3.8" +python-versions = ">=3.8" files = [ - {file = "rerun_sdk-0.18.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bc4e73275f428e4e9feb8e85f88db7a9fd18b997b1570de62f949a926978f1b2"}, - {file = "rerun_sdk-0.18.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:efbba40a59710ae83607cb0dc140398a35979c2d2acf5190c9def2ac4697f6a8"}, - {file = "rerun_sdk-0.18.2-cp38-abi3-manylinux_2_31_aarch64.whl", hash = "sha256:2a5e3b618b6d1bfde09bd5614a898995f3c318cc69d8f6d569924a2cd41536ce"}, - {file = "rerun_sdk-0.18.2-cp38-abi3-manylinux_2_31_x86_64.whl", hash = "sha256:8fdfc4c51ef2e75cb68d39e56f0d7c196eff250cb9a0260c07d5e2d6736e31b0"}, - {file = "rerun_sdk-0.18.2-cp38-abi3-win_amd64.whl", hash = "sha256:c929ade91d3be301b26671b25e70fb529524ced915523d266641c6fc667a1eb5"}, + {file = "rerun_sdk-0.21.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1e454ceea31c70ae9ec1bb26eaa82828661b7657ab4d2261ca0b94006d6a1975"}, + {file = "rerun_sdk-0.21.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:84ecb77b0b5bac71b53e849801ff073de89fcd2f1e0ca0da62fb18fcbeceadf0"}, + {file = "rerun_sdk-0.21.0-cp38-abi3-manylinux_2_31_aarch64.whl", hash = "sha256:919d921165c3238490dbe5bf00a062c68fdd2c54dc14aac6a1914c82edb5d9c8"}, + {file = "rerun_sdk-0.21.0-cp38-abi3-manylinux_2_31_x86_64.whl", hash = "sha256:897649aadcab7014b78096f93c84c61c00a227b80adaf0dec279924b5aab53d8"}, + {file = "rerun_sdk-0.21.0-cp38-abi3-win_amd64.whl", hash = "sha256:2060bdb536a198f0f04789ba5ba771e66587e7851d668b3dfab257a5efa16819"}, ] [package.dependencies] attrs = ">=23.1.0" -numpy = ">=1.23,<2" +numpy = ">=1.23" pillow = ">=8.0.0" pyarrow = ">=14.0.2" typing-extensions = ">=4.5" [package.extras] -notebook = ["rerun-notebook (==0.18.2)"] +notebook = ["rerun-notebook (==0.21.0)"] tests = ["pytest (==7.1.2)"] [[package]] @@ -6000,13 +6099,13 @@ files = [ [[package]] name = "rich" -version = "13.9.2" +version = "13.9.4" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = true python-versions = ">=3.8.0" files = [ - {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, - {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, + {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, + {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, ] [package.dependencies] @@ -6019,114 +6118,114 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.20.0" +version = "0.22.3" description = "Python bindings to Rust's persistent data structures (rpds)" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"}, - {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"}, - {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"}, - {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"}, - {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"}, - {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"}, - {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"}, - {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"}, - {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"}, - {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"}, - {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"}, - {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"}, - {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"}, - {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"}, - {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"}, - {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"}, - {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"}, - {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"}, - {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"}, - {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"}, - {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"}, - {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"}, - {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"}, - {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"}, - {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"}, + {file = "rpds_py-0.22.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967"}, + {file = "rpds_py-0.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70eb60b3ae9245ddea20f8a4190bd79c705a22f8028aaf8bbdebe4716c3fab24"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4041711832360a9b75cfb11b25a6a97c8fb49c07b8bd43d0d02b45d0b499a4ff"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64607d4cbf1b7e3c3c8a14948b99345eda0e161b852e122c6bb71aab6d1d798c"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e69b0a0e2537f26d73b4e43ad7bc8c8efb39621639b4434b76a3de50c6966e"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc27863442d388870c1809a87507727b799c8460573cfbb6dc0eeaef5a11b5ec"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e79dd39f1e8c3504be0607e5fc6e86bb60fe3584bec8b782578c3b0fde8d932c"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e0fa2d4ec53dc51cf7d3bb22e0aa0143966119f42a0c3e4998293a3dd2856b09"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fda7cb070f442bf80b642cd56483b5548e43d366fe3f39b98e67cce780cded00"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cff63a0272fcd259dcc3be1657b07c929c466b067ceb1c20060e8d10af56f5bf"}, + {file = "rpds_py-0.22.3-cp310-cp310-win32.whl", hash = "sha256:9bd7228827ec7bb817089e2eb301d907c0d9827a9e558f22f762bb690b131652"}, + {file = "rpds_py-0.22.3-cp310-cp310-win_amd64.whl", hash = "sha256:9beeb01d8c190d7581a4d59522cd3d4b6887040dcfc744af99aa59fef3e041a8"}, + {file = "rpds_py-0.22.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d20cfb4e099748ea39e6f7b16c91ab057989712d31761d3300d43134e26e165f"}, + {file = "rpds_py-0.22.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:68049202f67380ff9aa52f12e92b1c30115f32e6895cd7198fa2a7961621fc5a"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb4f868f712b2dd4bcc538b0a0c1f63a2b1d584c925e69a224d759e7070a12d5"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc51abd01f08117283c5ebf64844a35144a0843ff7b2983e0648e4d3d9f10dbb"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f3cec041684de9a4684b1572fe28c7267410e02450f4561700ca5a3bc6695a2"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ef9d9da710be50ff6809fed8f1963fecdfecc8b86656cadfca3bc24289414b0"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59f4a79c19232a5774aee369a0c296712ad0e77f24e62cad53160312b1c1eaa1"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a60bce91f81ddaac922a40bbb571a12c1070cb20ebd6d49c48e0b101d87300d"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e89391e6d60251560f0a8f4bd32137b077a80d9b7dbe6d5cab1cd80d2746f648"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e3fb866d9932a3d7d0c82da76d816996d1667c44891bd861a0f97ba27e84fc74"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1352ae4f7c717ae8cba93421a63373e582d19d55d2ee2cbb184344c82d2ae55a"}, + {file = "rpds_py-0.22.3-cp311-cp311-win32.whl", hash = "sha256:b0b4136a252cadfa1adb705bb81524eee47d9f6aab4f2ee4fa1e9d3cd4581f64"}, + {file = "rpds_py-0.22.3-cp311-cp311-win_amd64.whl", hash = "sha256:8bd7c8cfc0b8247c8799080fbff54e0b9619e17cdfeb0478ba7295d43f635d7c"}, + {file = "rpds_py-0.22.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:27e98004595899949bd7a7b34e91fa7c44d7a97c40fcaf1d874168bb652ec67e"}, + {file = "rpds_py-0.22.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1978d0021e943aae58b9b0b196fb4895a25cc53d3956b8e35e0b7682eefb6d56"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655ca44a831ecb238d124e0402d98f6212ac527a0ba6c55ca26f616604e60a45"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:feea821ee2a9273771bae61194004ee2fc33f8ec7db08117ef9147d4bbcbca8e"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22bebe05a9ffc70ebfa127efbc429bc26ec9e9b4ee4d15a740033efda515cf3d"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af6e48651c4e0d2d166dc1b033b7042ea3f871504b6805ba5f4fe31581d8d38"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ba3c290821343c192f7eae1d8fd5999ca2dc99994114643e2f2d3e6138b15"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02fbb9c288ae08bcb34fb41d516d5eeb0455ac35b5512d03181d755d80810059"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f56a6b404f74ab372da986d240e2e002769a7d7102cc73eb238a4f72eec5284e"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0a0461200769ab3b9ab7e513f6013b7a97fdeee41c29b9db343f3c5a8e2b9e61"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8633e471c6207a039eff6aa116e35f69f3156b3989ea3e2d755f7bc41754a4a7"}, + {file = "rpds_py-0.22.3-cp312-cp312-win32.whl", hash = "sha256:593eba61ba0c3baae5bc9be2f5232430453fb4432048de28399ca7376de9c627"}, + {file = "rpds_py-0.22.3-cp312-cp312-win_amd64.whl", hash = "sha256:d115bffdd417c6d806ea9069237a4ae02f513b778e3789a359bc5856e0404cc4"}, + {file = "rpds_py-0.22.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ea7433ce7e4bfc3a85654aeb6747babe3f66eaf9a1d0c1e7a4435bbdf27fea84"}, + {file = "rpds_py-0.22.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6dd9412824c4ce1aca56c47b0991e65bebb7ac3f4edccfd3f156150c96a7bf25"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20070c65396f7373f5df4005862fa162db5d25d56150bddd0b3e8214e8ef45b4"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b09865a9abc0ddff4e50b5ef65467cd94176bf1e0004184eb915cbc10fc05c5"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3453e8d41fe5f17d1f8e9c383a7473cd46a63661628ec58e07777c2fff7196dc"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5d36399a1b96e1a5fdc91e0522544580dbebeb1f77f27b2b0ab25559e103b8b"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:009de23c9c9ee54bf11303a966edf4d9087cd43a6003672e6aa7def643d06518"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1aef18820ef3e4587ebe8b3bc9ba6e55892a6d7b93bac6d29d9f631a3b4befbd"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f60bd8423be1d9d833f230fdbccf8f57af322d96bcad6599e5a771b151398eb2"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:62d9cfcf4948683a18a9aff0ab7e1474d407b7bab2ca03116109f8464698ab16"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9253fc214112405f0afa7db88739294295f0e08466987f1d70e29930262b4c8f"}, + {file = "rpds_py-0.22.3-cp313-cp313-win32.whl", hash = "sha256:fb0ba113b4983beac1a2eb16faffd76cb41e176bf58c4afe3e14b9c681f702de"}, + {file = "rpds_py-0.22.3-cp313-cp313-win_amd64.whl", hash = "sha256:c58e2339def52ef6b71b8f36d13c3688ea23fa093353f3a4fee2556e62086ec9"}, + {file = "rpds_py-0.22.3-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f82a116a1d03628a8ace4859556fb39fd1424c933341a08ea3ed6de1edb0283b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3dfcbc95bd7992b16f3f7ba05af8a64ca694331bd24f9157b49dadeeb287493b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59259dc58e57b10e7e18ce02c311804c10c5a793e6568f8af4dead03264584d1"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5725dd9cc02068996d4438d397e255dcb1df776b7ceea3b9cb972bdb11260a83"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99b37292234e61325e7a5bb9689e55e48c3f5f603af88b1642666277a81f1fbd"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27b1d3b3915a99208fee9ab092b8184c420f2905b7d7feb4aeb5e4a9c509b8a1"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f612463ac081803f243ff13cccc648578e2279295048f2a8d5eb430af2bae6e3"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f73d3fef726b3243a811121de45193c0ca75f6407fe66f3f4e183c983573e130"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3f21f0495edea7fdbaaa87e633a8689cd285f8f4af5c869f27bc8074638ad69c"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:1e9663daaf7a63ceccbbb8e3808fe90415b0757e2abddbfc2e06c857bf8c5e2b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a76e42402542b1fae59798fab64432b2d015ab9d0c8c47ba7addddbaf7952333"}, + {file = "rpds_py-0.22.3-cp313-cp313t-win32.whl", hash = "sha256:69803198097467ee7282750acb507fba35ca22cc3b85f16cf45fb01cb9097730"}, + {file = "rpds_py-0.22.3-cp313-cp313t-win_amd64.whl", hash = "sha256:f5cf2a0c2bdadf3791b5c205d55a37a54025c6e18a71c71f82bb536cf9a454bf"}, + {file = "rpds_py-0.22.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:378753b4a4de2a7b34063d6f95ae81bfa7b15f2c1a04a9518e8644e81807ebea"}, + {file = "rpds_py-0.22.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3445e07bf2e8ecfeef6ef67ac83de670358abf2996916039b16a218e3d95e97e"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b2513ba235829860b13faa931f3b6846548021846ac808455301c23a101689d"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eaf16ae9ae519a0e237a0f528fd9f0197b9bb70f40263ee57ae53c2b8d48aeb3"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:583f6a1993ca3369e0f80ba99d796d8e6b1a3a2a442dd4e1a79e652116413091"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4617e1915a539a0d9a9567795023de41a87106522ff83fbfaf1f6baf8e85437e"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c150c7a61ed4a4f4955a96626574e9baf1adf772c2fb61ef6a5027e52803543"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fa4331c200c2521512595253f5bb70858b90f750d39b8cbfd67465f8d1b596d"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:214b7a953d73b5e87f0ebece4a32a5bd83c60a3ecc9d4ec8f1dca968a2d91e99"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f47ad3d5f3258bd7058d2d506852217865afefe6153a36eb4b6928758041d831"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f276b245347e6e36526cbd4a266a417796fc531ddf391e43574cf6466c492520"}, + {file = "rpds_py-0.22.3-cp39-cp39-win32.whl", hash = "sha256:bbb232860e3d03d544bc03ac57855cd82ddf19c7a07651a7c0fdb95e9efea8b9"}, + {file = "rpds_py-0.22.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfbc454a2880389dbb9b5b398e50d439e2e58669160f27b60e5eca11f68ae17c"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d48424e39c2611ee1b84ad0f44fb3b2b53d473e65de061e3f460fc0be5f1939d"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:24e8abb5878e250f2eb0d7859a8e561846f98910326d06c0d51381fed59357bd"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b232061ca880db21fa14defe219840ad9b74b6158adb52ddf0e87bead9e8493"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac0a03221cdb5058ce0167ecc92a8c89e8d0decdc9e99a2ec23380793c4dcb96"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb0c341fa71df5a4595f9501df4ac5abfb5a09580081dffbd1ddd4654e6e9123"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf9db5488121b596dbfc6718c76092fda77b703c1f7533a226a5a9f65248f8ad"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8db6b5b2d4491ad5b6bdc2bc7c017eec108acbf4e6785f42a9eb0ba234f4c9"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b3d504047aba448d70cf6fa22e06cb09f7cbd761939fdd47604f5e007675c24e"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e61b02c3f7a1e0b75e20c3978f7135fd13cb6cf551bf4a6d29b999a88830a338"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e35ba67d65d49080e8e5a1dd40101fccdd9798adb9b050ff670b7d74fa41c566"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:26fd7cac7dd51011a245f29a2cc6489c4608b5a8ce8d75661bb4a1066c52dfbe"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:177c7c0fce2855833819c98e43c262007f42ce86651ffbb84f37883308cb0e7d"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb47271f60660803ad11f4c61b42242b8c1312a31c98c578f79ef9387bbde21c"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:70fb28128acbfd264eda9bf47015537ba3fe86e40d046eb2963d75024be4d055"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44d61b4b7d0c2c9ac019c314e52d7cbda0ae31078aabd0f22e583af3e0d79723"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f0e260eaf54380380ac3808aa4ebe2d8ca28b9087cf411649f96bad6900c728"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b25bc607423935079e05619d7de556c91fb6adeae9d5f80868dde3468657994b"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb6116dfb8d1925cbdb52595560584db42a7f664617a1f7d7f6e32f138cdf37d"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a63cbdd98acef6570c62b92a1e43266f9e8b21e699c363c0fef13bd530799c11"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b8f60e1b739a74bab7e01fcbe3dddd4657ec685caa04681df9d562ef15b625f"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:2e8b55d8517a2fda8d95cb45d62a5a8bbf9dd0ad39c5b25c8833efea07b880ca"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:2de29005e11637e7a2361fa151f780ff8eb2543a0da1413bb951e9f14b699ef3"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:666ecce376999bf619756a24ce15bb14c5bfaf04bf00abc7e663ce17c3f34fe7"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5246b14ca64a8675e0a7161f7af68fe3e910e6b90542b4bfb5439ba752191df6"}, + {file = "rpds_py-0.22.3.tar.gz", hash = "sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d"}, ] [[package]] @@ -6144,121 +6243,26 @@ pyserial = "*" [[package]] name = "safetensors" -version = "0.4.5" +version = "0.5.0" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "safetensors-0.4.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a63eaccd22243c67e4f2b1c3e258b257effc4acd78f3b9d397edc8cf8f1298a7"}, - {file = "safetensors-0.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:23fc9b4ec7b602915cbb4ec1a7c1ad96d2743c322f20ab709e2c35d1b66dad27"}, - {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6885016f34bef80ea1085b7e99b3c1f92cb1be78a49839203060f67b40aee761"}, - {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:133620f443450429322f238fda74d512c4008621227fccf2f8cf4a76206fea7c"}, - {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4fb3e0609ec12d2a77e882f07cced530b8262027f64b75d399f1504ffec0ba56"}, - {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0f1dd769f064adc33831f5e97ad07babbd728427f98e3e1db6902e369122737"}, - {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6d156bdb26732feada84f9388a9f135528c1ef5b05fae153da365ad4319c4c5"}, - {file = "safetensors-0.4.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e347d77e2c77eb7624400ccd09bed69d35c0332f417ce8c048d404a096c593b"}, - {file = "safetensors-0.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9f556eea3aec1d3d955403159fe2123ddd68e880f83954ee9b4a3f2e15e716b6"}, - {file = "safetensors-0.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9483f42be3b6bc8ff77dd67302de8ae411c4db39f7224dec66b0eb95822e4163"}, - {file = "safetensors-0.4.5-cp310-none-win32.whl", hash = "sha256:7389129c03fadd1ccc37fd1ebbc773f2b031483b04700923c3511d2a939252cc"}, - {file = "safetensors-0.4.5-cp310-none-win_amd64.whl", hash = "sha256:e98ef5524f8b6620c8cdef97220c0b6a5c1cef69852fcd2f174bb96c2bb316b1"}, - {file = "safetensors-0.4.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:21f848d7aebd5954f92538552d6d75f7c1b4500f51664078b5b49720d180e47c"}, - {file = "safetensors-0.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb07000b19d41e35eecef9a454f31a8b4718a185293f0d0b1c4b61d6e4487971"}, - {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09dedf7c2fda934ee68143202acff6e9e8eb0ddeeb4cfc24182bef999efa9f42"}, - {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:59b77e4b7a708988d84f26de3ebead61ef1659c73dcbc9946c18f3b1786d2688"}, - {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d3bc83e14d67adc2e9387e511097f254bd1b43c3020440e708858c684cbac68"}, - {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39371fc551c1072976073ab258c3119395294cf49cdc1f8476794627de3130df"}, - {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6c19feda32b931cae0acd42748a670bdf56bee6476a046af20181ad3fee4090"}, - {file = "safetensors-0.4.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a659467495de201e2f282063808a41170448c78bada1e62707b07a27b05e6943"}, - {file = "safetensors-0.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bad5e4b2476949bcd638a89f71b6916fa9a5cae5c1ae7eede337aca2100435c0"}, - {file = "safetensors-0.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a3a315a6d0054bc6889a17f5668a73f94f7fe55121ff59e0a199e3519c08565f"}, - {file = "safetensors-0.4.5-cp311-none-win32.whl", hash = "sha256:a01e232e6d3d5cf8b1667bc3b657a77bdab73f0743c26c1d3c5dd7ce86bd3a92"}, - {file = "safetensors-0.4.5-cp311-none-win_amd64.whl", hash = "sha256:cbd39cae1ad3e3ef6f63a6f07296b080c951f24cec60188378e43d3713000c04"}, - {file = "safetensors-0.4.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:473300314e026bd1043cef391bb16a8689453363381561b8a3e443870937cc1e"}, - {file = "safetensors-0.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:801183a0f76dc647f51a2d9141ad341f9665602a7899a693207a82fb102cc53e"}, - {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1524b54246e422ad6fb6aea1ac71edeeb77666efa67230e1faf6999df9b2e27f"}, - {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3139098e3e8b2ad7afbca96d30ad29157b50c90861084e69fcb80dec7430461"}, - {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65573dc35be9059770808e276b017256fa30058802c29e1038eb1c00028502ea"}, - {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd33da8e9407559f8779c82a0448e2133737f922d71f884da27184549416bfed"}, - {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3685ce7ed036f916316b567152482b7e959dc754fcc4a8342333d222e05f407c"}, - {file = "safetensors-0.4.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dde2bf390d25f67908278d6f5d59e46211ef98e44108727084d4637ee70ab4f1"}, - {file = "safetensors-0.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7469d70d3de970b1698d47c11ebbf296a308702cbaae7fcb993944751cf985f4"}, - {file = "safetensors-0.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a6ba28118636a130ccbb968bc33d4684c48678695dba2590169d5ab03a45646"}, - {file = "safetensors-0.4.5-cp312-none-win32.whl", hash = "sha256:c859c7ed90b0047f58ee27751c8e56951452ed36a67afee1b0a87847d065eec6"}, - {file = "safetensors-0.4.5-cp312-none-win_amd64.whl", hash = "sha256:b5a8810ad6a6f933fff6c276eae92c1da217b39b4d8b1bc1c0b8af2d270dc532"}, - {file = "safetensors-0.4.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:25e5f8e2e92a74f05b4ca55686234c32aac19927903792b30ee6d7bd5653d54e"}, - {file = "safetensors-0.4.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:81efb124b58af39fcd684254c645e35692fea81c51627259cdf6d67ff4458916"}, - {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:585f1703a518b437f5103aa9cf70e9bd437cb78eea9c51024329e4fb8a3e3679"}, - {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b99fbf72e3faf0b2f5f16e5e3458b93b7d0a83984fe8d5364c60aa169f2da89"}, - {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b17b299ca9966ca983ecda1c0791a3f07f9ca6ab5ded8ef3d283fff45f6bcd5f"}, - {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76ded72f69209c9780fdb23ea89e56d35c54ae6abcdec67ccb22af8e696e449a"}, - {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2783956926303dcfeb1de91a4d1204cd4089ab441e622e7caee0642281109db3"}, - {file = "safetensors-0.4.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d94581aab8c6b204def4d7320f07534d6ee34cd4855688004a4354e63b639a35"}, - {file = "safetensors-0.4.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:67e1e7cb8678bb1b37ac48ec0df04faf689e2f4e9e81e566b5c63d9f23748523"}, - {file = "safetensors-0.4.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbd280b07e6054ea68b0cb4b16ad9703e7d63cd6890f577cb98acc5354780142"}, - {file = "safetensors-0.4.5-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:77d9b228da8374c7262046a36c1f656ba32a93df6cc51cd4453af932011e77f1"}, - {file = "safetensors-0.4.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:500cac01d50b301ab7bb192353317035011c5ceeef0fca652f9f43c000bb7f8d"}, - {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75331c0c746f03158ded32465b7d0b0e24c5a22121743662a2393439c43a45cf"}, - {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670e95fe34e0d591d0529e5e59fd9d3d72bc77b1444fcaa14dccda4f36b5a38b"}, - {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:098923e2574ff237c517d6e840acada8e5b311cb1fa226019105ed82e9c3b62f"}, - {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13ca0902d2648775089fa6a0c8fc9e6390c5f8ee576517d33f9261656f851e3f"}, - {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f0032bedc869c56f8d26259fe39cd21c5199cd57f2228d817a0e23e8370af25"}, - {file = "safetensors-0.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4b15f51b4f8f2a512341d9ce3475cacc19c5fdfc5db1f0e19449e75f95c7dc8"}, - {file = "safetensors-0.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f6594d130d0ad933d885c6a7b75c5183cb0e8450f799b80a39eae2b8508955eb"}, - {file = "safetensors-0.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:60c828a27e852ded2c85fc0f87bf1ec20e464c5cd4d56ff0e0711855cc2e17f8"}, - {file = "safetensors-0.4.5-cp37-none-win32.whl", hash = "sha256:6d3de65718b86c3eeaa8b73a9c3d123f9307a96bbd7be9698e21e76a56443af5"}, - {file = "safetensors-0.4.5-cp37-none-win_amd64.whl", hash = "sha256:5a2d68a523a4cefd791156a4174189a4114cf0bf9c50ceb89f261600f3b2b81a"}, - {file = "safetensors-0.4.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:e7a97058f96340850da0601a3309f3d29d6191b0702b2da201e54c6e3e44ccf0"}, - {file = "safetensors-0.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:63bfd425e25f5c733f572e2246e08a1c38bd6f2e027d3f7c87e2e43f228d1345"}, - {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3664ac565d0e809b0b929dae7ccd74e4d3273cd0c6d1220c6430035befb678e"}, - {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:313514b0b9b73ff4ddfb4edd71860696dbe3c1c9dc4d5cc13dbd74da283d2cbf"}, - {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31fa33ee326f750a2f2134a6174773c281d9a266ccd000bd4686d8021f1f3dac"}, - {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09566792588d77b68abe53754c9f1308fadd35c9f87be939e22c623eaacbed6b"}, - {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309aaec9b66cbf07ad3a2e5cb8a03205663324fea024ba391594423d0f00d9fe"}, - {file = "safetensors-0.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53946c5813b8f9e26103c5efff4a931cc45d874f45229edd68557ffb35ffb9f8"}, - {file = "safetensors-0.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:868f9df9e99ad1e7f38c52194063a982bc88fedc7d05096f4f8160403aaf4bd6"}, - {file = "safetensors-0.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9cc9449bd0b0bc538bd5e268221f0c5590bc5c14c1934a6ae359d44410dc68c4"}, - {file = "safetensors-0.4.5-cp38-none-win32.whl", hash = "sha256:83c4f13a9e687335c3928f615cd63a37e3f8ef072a3f2a0599fa09f863fb06a2"}, - {file = "safetensors-0.4.5-cp38-none-win_amd64.whl", hash = "sha256:b98d40a2ffa560653f6274e15b27b3544e8e3713a44627ce268f419f35c49478"}, - {file = "safetensors-0.4.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cf727bb1281d66699bef5683b04d98c894a2803442c490a8d45cd365abfbdeb2"}, - {file = "safetensors-0.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96f1d038c827cdc552d97e71f522e1049fef0542be575421f7684756a748e457"}, - {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:139fbee92570ecea774e6344fee908907db79646d00b12c535f66bc78bd5ea2c"}, - {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c36302c1c69eebb383775a89645a32b9d266878fab619819ce660309d6176c9b"}, - {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d641f5b8149ea98deb5ffcf604d764aad1de38a8285f86771ce1abf8e74c4891"}, - {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4db6a61d968de73722b858038c616a1bebd4a86abe2688e46ca0cc2d17558f2"}, - {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b75a616e02f21b6f1d5785b20cecbab5e2bd3f6358a90e8925b813d557666ec1"}, - {file = "safetensors-0.4.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:788ee7d04cc0e0e7f944c52ff05f52a4415b312f5efd2ee66389fb7685ee030c"}, - {file = "safetensors-0.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:87bc42bd04fd9ca31396d3ca0433db0be1411b6b53ac5a32b7845a85d01ffc2e"}, - {file = "safetensors-0.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4037676c86365a721a8c9510323a51861d703b399b78a6b4486a54a65a975fca"}, - {file = "safetensors-0.4.5-cp39-none-win32.whl", hash = "sha256:1500418454529d0ed5c1564bda376c4ddff43f30fce9517d9bee7bcce5a8ef50"}, - {file = "safetensors-0.4.5-cp39-none-win_amd64.whl", hash = "sha256:9d1a94b9d793ed8fe35ab6d5cea28d540a46559bafc6aae98f30ee0867000cab"}, - {file = "safetensors-0.4.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdadf66b5a22ceb645d5435a0be7a0292ce59648ca1d46b352f13cff3ea80410"}, - {file = "safetensors-0.4.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d42ffd4c2259f31832cb17ff866c111684c87bd930892a1ba53fed28370c918c"}, - {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd8a1f6d2063a92cd04145c7fd9e31a1c7d85fbec20113a14b487563fdbc0597"}, - {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:951d2fcf1817f4fb0ef0b48f6696688a4e852a95922a042b3f96aaa67eedc920"}, - {file = "safetensors-0.4.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ac85d9a8c1af0e3132371d9f2d134695a06a96993c2e2f0bbe25debb9e3f67a"}, - {file = "safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e3cec4a29eb7fe8da0b1c7988bc3828183080439dd559f720414450de076fcab"}, - {file = "safetensors-0.4.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:21742b391b859e67b26c0b2ac37f52c9c0944a879a25ad2f9f9f3cd61e7fda8f"}, - {file = "safetensors-0.4.5-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c7db3006a4915151ce1913652e907cdede299b974641a83fbc092102ac41b644"}, - {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f68bf99ea970960a237f416ea394e266e0361895753df06e3e06e6ea7907d98b"}, - {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8158938cf3324172df024da511839d373c40fbfaa83e9abf467174b2910d7b4c"}, - {file = "safetensors-0.4.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:540ce6c4bf6b58cb0fd93fa5f143bc0ee341c93bb4f9287ccd92cf898cc1b0dd"}, - {file = "safetensors-0.4.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bfeaa1a699c6b9ed514bd15e6a91e74738b71125a9292159e3d6b7f0a53d2cde"}, - {file = "safetensors-0.4.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:01c8f00da537af711979e1b42a69a8ec9e1d7112f208e0e9b8a35d2c381085ef"}, - {file = "safetensors-0.4.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a0dd565f83b30f2ca79b5d35748d0d99dd4b3454f80e03dfb41f0038e3bdf180"}, - {file = "safetensors-0.4.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:023b6e5facda76989f4cba95a861b7e656b87e225f61811065d5c501f78cdb3f"}, - {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9633b663393d5796f0b60249549371e392b75a0b955c07e9c6f8708a87fc841f"}, - {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78dd8adfb48716233c45f676d6e48534d34b4bceb50162c13d1f0bdf6f78590a"}, - {file = "safetensors-0.4.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e8deb16c4321d61ae72533b8451ec4a9af8656d1c61ff81aa49f966406e4b68"}, - {file = "safetensors-0.4.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:52452fa5999dc50c4decaf0c53aa28371f7f1e0fe5c2dd9129059fbe1e1599c7"}, - {file = "safetensors-0.4.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d5f23198821e227cfc52d50fa989813513db381255c6d100927b012f0cfec63d"}, - {file = "safetensors-0.4.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f4beb84b6073b1247a773141a6331117e35d07134b3bb0383003f39971d414bb"}, - {file = "safetensors-0.4.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:68814d599d25ed2fdd045ed54d370d1d03cf35e02dce56de44c651f828fb9b7b"}, - {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b6453c54c57c1781292c46593f8a37254b8b99004c68d6c3ce229688931a22"}, - {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adaa9c6dead67e2dd90d634f89131e43162012479d86e25618e821a03d1eb1dc"}, - {file = "safetensors-0.4.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73e7d408e9012cd17511b382b43547850969c7979efc2bc353f317abaf23c84c"}, - {file = "safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:775409ce0fcc58b10773fdb4221ed1eb007de10fe7adbdf8f5e8a56096b6f0bc"}, - {file = "safetensors-0.4.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:834001bed193e4440c4a3950a31059523ee5090605c907c66808664c932b549c"}, - {file = "safetensors-0.4.5.tar.gz", hash = "sha256:d73de19682deabb02524b3d5d1f8b3aaba94c72f1bbfc7911b9b9d5d391c0310"}, + {file = "safetensors-0.5.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c683b9b485bee43422ba2855f72777c37647190281e03da4c8d2a69fa5336558"}, + {file = "safetensors-0.5.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:6106aa835deb7263f7014f74c05842ab828d6c11d789f2e7e98f26b1a305e72d"}, + {file = "safetensors-0.5.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1349611f74f55c5ee1c1c144c536a2743c38f7d8bf60b9fc8267e0efc0591a2"}, + {file = "safetensors-0.5.0-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:56d936028ac799e18644b08a91fd98b4b62ae3dcd0440b1cfcb56535785589f1"}, + {file = "safetensors-0.5.0-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2f26afada2233576ffea6b80042c2c0a8105c164254af56168ec14299ad3122"}, + {file = "safetensors-0.5.0-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20067e7a5e63f0cbc88457b2a1161e70ff73af4cc3a24bce90309430cd6f6e7e"}, + {file = "safetensors-0.5.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649d6a4aa34d5174ae87289068ccc2fec2a1a998ecf83425aa5a42c3eff69bcf"}, + {file = "safetensors-0.5.0-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:debff88f41d569a3e93a955469f83864e432af35bb34b16f65a9ddf378daa3ae"}, + {file = "safetensors-0.5.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:bdf6a3e366ea8ba1a0538db6099229e95811194432c684ea28ea7ae28763b8dc"}, + {file = "safetensors-0.5.0-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:0371afd84c200a80eb7103bf715108b0c3846132fb82453ae018609a15551580"}, + {file = "safetensors-0.5.0-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:5ec7fc8c3d2f32ebf1c7011bc886b362e53ee0a1ec6d828c39d531fed8b325d6"}, + {file = "safetensors-0.5.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:53715e4ea0ef23c08f004baae0f609a7773de7d4148727760417c6760cfd6b76"}, + {file = "safetensors-0.5.0-cp38-abi3-win32.whl", hash = "sha256:b85565bc2f0456961a788d2f11d9d892eec46603db0e4923aa9512c2355aa727"}, + {file = "safetensors-0.5.0-cp38-abi3-win_amd64.whl", hash = "sha256:f451941f8aa11e7be5c3fa450e264609a2b1e65fa38ae590a74e55a94d646b76"}, + {file = "safetensors-0.5.0.tar.gz", hash = "sha256:c47b34c549fa1e0c655c4644da31332c61332c732c47c8dd9399347e9aac69d1"}, ] [package.extras] @@ -6268,7 +6272,7 @@ jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[num mlx = ["mlx (>=0.0.9)"] numpy = ["numpy (>=1.21.6)"] paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] -pinned-tf = ["safetensors[numpy]", "tensorflow (==2.11.0)"] +pinned-tf = ["safetensors[numpy]", "tensorflow (==2.18.0)"] quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] @@ -6276,101 +6280,108 @@ torch = ["safetensors[numpy]", "torch (>=1.10)"] [[package]] name = "scikit-image" -version = "0.24.0" +version = "0.25.0" description = "Image processing in Python" optional = true -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "scikit_image-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb3bc0264b6ab30b43c4179ee6156bc18b4861e78bb329dd8d16537b7bbf827a"}, - {file = "scikit_image-0.24.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9c7a52e20cdd760738da38564ba1fed7942b623c0317489af1a598a8dedf088b"}, - {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93f46e6ce42e5409f4d09ce1b0c7f80dd7e4373bcec635b6348b63e3c886eac8"}, - {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39ee0af13435c57351a3397eb379e72164ff85161923eec0c38849fecf1b4764"}, - {file = "scikit_image-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:7ac7913b028b8aa780ffae85922894a69e33d1c0bf270ea1774f382fe8bf95e7"}, - {file = "scikit_image-0.24.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:272909e02a59cea3ed4aa03739bb88df2625daa809f633f40b5053cf09241831"}, - {file = "scikit_image-0.24.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:190ebde80b4470fe8838764b9b15f232a964f1a20391663e31008d76f0c696f7"}, - {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59c98cc695005faf2b79904e4663796c977af22586ddf1b12d6af2fa22842dc2"}, - {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa27b3a0dbad807b966b8db2d78da734cb812ca4787f7fbb143764800ce2fa9c"}, - {file = "scikit_image-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:dacf591ac0c272a111181afad4b788a27fe70d213cfddd631d151cbc34f8ca2c"}, - {file = "scikit_image-0.24.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6fccceb54c9574590abcddc8caf6cefa57c13b5b8b4260ab3ff88ad8f3c252b3"}, - {file = "scikit_image-0.24.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ccc01e4760d655aab7601c1ba7aa4ddd8b46f494ac46ec9c268df6f33ccddf4c"}, - {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18836a18d3a7b6aca5376a2d805f0045826bc6c9fc85331659c33b4813e0b563"}, - {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8579bda9c3f78cb3b3ed8b9425213c53a25fa7e994b7ac01f2440b395babf660"}, - {file = "scikit_image-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:82ab903afa60b2da1da2e6f0c8c65e7c8868c60a869464c41971da929b3e82bc"}, - {file = "scikit_image-0.24.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef04360eda372ee5cd60aebe9be91258639c86ae2ea24093fb9182118008d009"}, - {file = "scikit_image-0.24.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e9aadb442360a7e76f0c5c9d105f79a83d6df0e01e431bd1d5757e2c5871a1f3"}, - {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e37de6f4c1abcf794e13c258dc9b7d385d5be868441de11c180363824192ff7"}, - {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4688c18bd7ec33c08d7bf0fd19549be246d90d5f2c1d795a89986629af0a1e83"}, - {file = "scikit_image-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:56dab751d20b25d5d3985e95c9b4e975f55573554bd76b0aedf5875217c93e69"}, - {file = "scikit_image-0.24.0.tar.gz", hash = "sha256:5d16efe95da8edbeb363e0c4157b99becbd650a60b77f6e3af5768b66cf007ab"}, -] - -[package.dependencies] -imageio = ">=2.33" + {file = "scikit_image-0.25.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2e1ab19beedb2adaaf5173b0c508687a4c7d392ffb1c21513887ba2331b517e3"}, + {file = "scikit_image-0.25.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:6a749e8d7ba1216e3bd0da7156ddf6e1d9a4d03aa8bc86880b29aadd954b0b11"}, + {file = "scikit_image-0.25.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a8e6c3d58ab8cad95cd695bd0fe1be8b8708acdf02ebfcb6c0225e267250021"}, + {file = "scikit_image-0.25.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd419e765f33a43eebb3509cdab382938085c9e269c01d8da80dbe519e89ec3f"}, + {file = "scikit_image-0.25.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea2577b6f68cba3a06ac6f362ab39a62701fefce2138a6bf3e978ecbab71a024"}, + {file = "scikit_image-0.25.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7e235726d9b404527445679030209965c5365767b8728584fadd8dbfa29e29de"}, + {file = "scikit_image-0.25.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:854b88b7d8b862ccd8f22e660c16fd54f9430c87e079c8dfe46c7f0cebbb1de3"}, + {file = "scikit_image-0.25.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70e2d90b5bfffffe0880d25d40ddab9ca5c145912461d6c8f6bd3449f4e527bb"}, + {file = "scikit_image-0.25.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4287052dcd8fe63934daa6cbe28b2abe817d75e9b952290fdb4de42254740fc"}, + {file = "scikit_image-0.25.0-cp311-cp311-win_amd64.whl", hash = "sha256:d1e25ff6a3cdd8be938a5a06b441020aac304fa9f457a808bd359f5260468739"}, + {file = "scikit_image-0.25.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7e63f18b10f9b74590d2ca62cbc4147e696a5e72cfcbcd4af52395fd94fcdc6e"}, + {file = "scikit_image-0.25.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:bad4af5edf58775607c153af5bc3f193c2b67261ea9817b62362c746e439d094"}, + {file = "scikit_image-0.25.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44f7681ff99eed2c33d993bc4bfc17b62e6cadbca1081c7fdbb3607ce89b15e6"}, + {file = "scikit_image-0.25.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:758f55d858aa796114a4275051ca4bb41d8b40c53eb78cb60f0b1ed235d4144b"}, + {file = "scikit_image-0.25.0-cp312-cp312-win_amd64.whl", hash = "sha256:4f7178c6fb6163710571522847326ad936a603646255b22d3d76b6ba58153890"}, + {file = "scikit_image-0.25.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d3b08a8894190bc49038dc1a61f6ef0991ff520e5268604abd7ad217f693a0cc"}, + {file = "scikit_image-0.25.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:8438eac699c8b2820e5956960191d0c3b302bf9c4d42dbf194a229db04abacc3"}, + {file = "scikit_image-0.25.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9920673ef08ea44026c80deb14cf84d5c0cc1a68efad914c126b76110ed017a8"}, + {file = "scikit_image-0.25.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fe2f05cda852a5f90872054dd3709e9c4e670fc7332aef169867944e1b37431"}, + {file = "scikit_image-0.25.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ede552097ee281d01b25dc4ce121fdc17b6a43c36bbc3c13e39f0e3d8fb5239"}, + {file = "scikit_image-0.25.0.tar.gz", hash = "sha256:58d94fea11b6b3306b3770417dc1cbca7fa9bcbd6a13945d7910399c88c2018c"}, +] + +[package.dependencies] +imageio = ">=2.33,<2.35.0 || >2.35.0" lazy-loader = ">=0.4" -networkx = ">=2.8" -numpy = ">=1.23" +networkx = ">=3.0" +numpy = ">=1.24" packaging = ">=21" -pillow = ">=9.1" -scipy = ">=1.9" +pillow = ">=10.1" +scipy = ">=1.11.2" tifffile = ">=2022.8.12" [package.extras] -build = ["Cython (>=3.0.4)", "build", "meson-python (>=0.15)", "ninja", "numpy (>=2.0.0rc1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.8)", "wheel"] +build = ["Cython (>=3.0.8)", "build (>=1.2.1)", "meson-python (>=0.16)", "ninja (>=1.11.1.1)", "numpy (>=2.0)", "pythran (>=0.16)", "setuptools (>=68)", "spin (==0.13)"] data = ["pooch (>=1.6.0)"] developer = ["ipython", "pre-commit", "tomli"] -docs = ["PyWavelets (>=1.1.1)", "dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.6)", "myst-parser", "numpydoc (>=1.7)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.15.2)", "pytest-doctestplus", "pytest-runner", "scikit-learn (>=1.1)", "seaborn (>=0.11)", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-gallery (>=0.14)", "sphinx_design (>=0.5)", "tifffile (>=2022.8.12)"] -optional = ["PyWavelets (>=1.1.1)", "SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.6)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=1.1)"] +docs = ["PyWavelets (>=1.6)", "dask[array] (>=2022.9.2)", "intersphinx-registry (>=0.2411.14)", "ipykernel", "ipywidgets", "kaleido (==0.2.1)", "matplotlib (>=3.7)", "myst-parser", "numpydoc (>=1.7)", "pandas (>=2.0)", "plotly (>=5.20)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.16)", "pytest-doctestplus", "scikit-learn (>=1.2)", "seaborn (>=0.11)", "sphinx (>=8.0)", "sphinx-copybutton", "sphinx-gallery[parallel] (>=0.18)", "sphinx_design (>=0.5)", "tifffile (>=2022.8.12)"] +optional = ["PyWavelets (>=1.6)", "SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0,!=2024.8.0)", "matplotlib (>=3.7)", "pooch (>=1.6.0)", "pyamg (>=5.2)", "scikit-learn (>=1.2)"] test = ["asv", "numpydoc (>=1.7)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-doctestplus", "pytest-faulthandler", "pytest-localserver"] [[package]] name = "scipy" -version = "1.14.1" +version = "1.15.0" description = "Fundamental algorithms for scientific computing in Python" optional = true python-versions = ">=3.10" files = [ - {file = "scipy-1.14.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:b28d2ca4add7ac16ae8bb6632a3c86e4b9e4d52d3e34267f6e1b0c1f8d87e389"}, - {file = "scipy-1.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d0d2821003174de06b69e58cef2316a6622b60ee613121199cb2852a873f8cf3"}, - {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8bddf15838ba768bb5f5083c1ea012d64c9a444e16192762bd858f1e126196d0"}, - {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:97c5dddd5932bd2a1a31c927ba5e1463a53b87ca96b5c9bdf5dfd6096e27efc3"}, - {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ff0a7e01e422c15739ecd64432743cf7aae2b03f3084288f399affcefe5222d"}, - {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69"}, - {file = "scipy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8426251ad1e4ad903a4514712d2fa8fdd5382c978010d1c6f5f37ef286a713ad"}, - {file = "scipy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:a49f6ed96f83966f576b33a44257d869756df6cf1ef4934f59dd58b25e0327e5"}, - {file = "scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:2da0469a4ef0ecd3693761acbdc20f2fdeafb69e6819cc081308cc978153c675"}, - {file = "scipy-1.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c0ee987efa6737242745f347835da2cc5bb9f1b42996a4d97d5c7ff7928cb6f2"}, - {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3a1b111fac6baec1c1d92f27e76511c9e7218f1695d61b59e05e0fe04dc59617"}, - {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8475230e55549ab3f207bff11ebfc91c805dc3463ef62eda3ccf593254524ce8"}, - {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:278266012eb69f4a720827bdd2dc54b2271c97d84255b2faaa8f161a158c3b37"}, - {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef8c87f8abfb884dac04e97824b61299880c43f4ce675dd2cbeadd3c9b466d2"}, - {file = "scipy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b05d43735bb2f07d689f56f7b474788a13ed8adc484a85aa65c0fd931cf9ccd2"}, - {file = "scipy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:716e389b694c4bb564b4fc0c51bc84d381735e0d39d3f26ec1af2556ec6aad94"}, - {file = "scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d"}, - {file = "scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07"}, - {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5"}, - {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc"}, - {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310"}, - {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066"}, - {file = "scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1"}, - {file = "scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f"}, - {file = "scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79"}, - {file = "scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e"}, - {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73"}, - {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e"}, - {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d"}, - {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e"}, - {file = "scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06"}, - {file = "scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84"}, - {file = "scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417"}, -] - -[package.dependencies] -numpy = ">=1.23.5,<2.3" + {file = "scipy-1.15.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:aeac60d3562a7bf2f35549bdfdb6b1751c50590f55ce7322b4b2fc821dc27fca"}, + {file = "scipy-1.15.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5abbdc6ede5c5fed7910cf406a948e2c0869231c0db091593a6b2fa78be77e5d"}, + {file = "scipy-1.15.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:eb1533c59f0ec6c55871206f15a5c72d1fae7ad3c0a8ca33ca88f7c309bbbf8c"}, + {file = "scipy-1.15.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:de112c2dae53107cfeaf65101419662ac0a54e9a088c17958b51c95dac5de56d"}, + {file = "scipy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2240e1fd0782e62e1aacdc7234212ee271d810f67e9cd3b8d521003a82603ef8"}, + {file = "scipy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d35aef233b098e4de88b1eac29f0df378278e7e250a915766786b773309137c4"}, + {file = "scipy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b29e4fc02e155a5fd1165f1e6a73edfdd110470736b0f48bcbe48083f0eee37"}, + {file = "scipy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:0e5b34f8894f9904cc578008d1a9467829c1817e9f9cb45e6d6eeb61d2ab7731"}, + {file = "scipy-1.15.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:46e91b5b16909ff79224b56e19cbad65ca500b3afda69225820aa3afbf9ec020"}, + {file = "scipy-1.15.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:82bff2eb01ccf7cea8b6ee5274c2dbeadfdac97919da308ee6d8e5bcbe846443"}, + {file = "scipy-1.15.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:9c8254fe21dd2c6c8f7757035ec0c31daecf3bb3cffd93bc1ca661b731d28136"}, + {file = "scipy-1.15.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:c9624eeae79b18cab1a31944b5ef87aa14b125d6ab69b71db22f0dbd962caf1e"}, + {file = "scipy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d13bbc0658c11f3d19df4138336e4bce2c4fbd78c2755be4bf7b8e235481557f"}, + {file = "scipy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdca4c7bb8dc41307e5f39e9e5d19c707d8e20a29845e7533b3bb20a9d4ccba0"}, + {file = "scipy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6f376d7c767731477bac25a85d0118efdc94a572c6b60decb1ee48bf2391a73b"}, + {file = "scipy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:61513b989ee8d5218fbeb178b2d51534ecaddba050db949ae99eeb3d12f6825d"}, + {file = "scipy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5beb0a2200372b7416ec73fdae94fe81a6e85e44eb49c35a11ac356d2b8eccc6"}, + {file = "scipy-1.15.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:fde0f3104dfa1dfbc1f230f65506532d0558d43188789eaf68f97e106249a913"}, + {file = "scipy-1.15.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:35c68f7044b4e7ad73a3e68e513dda946989e523df9b062bd3cf401a1a882192"}, + {file = "scipy-1.15.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:52475011be29dfcbecc3dfe3060e471ac5155d72e9233e8d5616b84e2b542054"}, + {file = "scipy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5972e3f96f7dda4fd3bb85906a17338e65eaddfe47f750e240f22b331c08858e"}, + {file = "scipy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe00169cf875bed0b3c40e4da45b57037dc21d7c7bf0c85ed75f210c281488f1"}, + {file = "scipy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:161f80a98047c219c257bf5ce1777c574bde36b9d962a46b20d0d7e531f86863"}, + {file = "scipy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:327163ad73e54541a675240708244644294cb0a65cca420c9c79baeb9648e479"}, + {file = "scipy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0fcb16eb04d84670722ce8d93b05257df471704c913cb0ff9dc5a1c31d1e9422"}, + {file = "scipy-1.15.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:767e8cf6562931f8312f4faa7ddea412cb783d8df49e62c44d00d89f41f9bbe8"}, + {file = "scipy-1.15.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:37ce9394cdcd7c5f437583fc6ef91bd290014993900643fdfc7af9b052d1613b"}, + {file = "scipy-1.15.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:6d26f17c64abd6c6c2dfb39920f61518cc9e213d034b45b2380e32ba78fde4c0"}, + {file = "scipy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e2448acd79c6374583581a1ded32ac71a00c2b9c62dfa87a40e1dd2520be111"}, + {file = "scipy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36be480e512d38db67f377add5b759fb117edd987f4791cdf58e59b26962bee4"}, + {file = "scipy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ccb6248a9987193fe74363a2d73b93bc2c546e0728bd786050b7aef6e17db03c"}, + {file = "scipy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:952d2e9eaa787f0a9e95b6e85da3654791b57a156c3e6609e65cc5176ccfe6f2"}, + {file = "scipy-1.15.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b1432102254b6dc7766d081fa92df87832ac25ff0b3d3a940f37276e63eb74ff"}, + {file = "scipy-1.15.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:4e08c6a36f46abaedf765dd2dfcd3698fa4bd7e311a9abb2d80e33d9b2d72c34"}, + {file = "scipy-1.15.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:ec915cd26d76f6fc7ae8522f74f5b2accf39546f341c771bb2297f3871934a52"}, + {file = "scipy-1.15.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:351899dd2a801edd3691622172bc8ea01064b1cada794f8641b89a7dc5418db6"}, + {file = "scipy-1.15.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9baff912ea4f78a543d183ed6f5b3bea9784509b948227daaf6f10727a0e2e5"}, + {file = "scipy-1.15.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cd9d9198a7fd9a77f0eb5105ea9734df26f41faeb2a88a0e62e5245506f7b6df"}, + {file = "scipy-1.15.0-cp313-cp313t-win_amd64.whl", hash = "sha256:129f899ed275c0515d553b8d31696924e2ca87d1972421e46c376b9eb87de3d2"}, + {file = "scipy-1.15.0.tar.gz", hash = "sha256:300742e2cc94e36a2880ebe464a1c8b4352a7b0f3e36ec3d2ac006cdbe0219ac"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.5" [package.extras] dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] -doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<=7.3.7)", "sphinx-design (>=0.4.0)"] -test = ["Cython", "array-api-strict (>=2.0)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.16.5)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "send2trash" @@ -6390,13 +6401,13 @@ win32 = ["pywin32"] [[package]] name = "sentry-sdk" -version = "2.16.0" +version = "2.19.2" description = "Python client for Sentry (https://sentry.io)" optional = false python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.16.0-py2.py3-none-any.whl", hash = "sha256:49139c31ebcd398f4f6396b18910610a0c1602f6e67083240c33019d1f6aa30c"}, - {file = "sentry_sdk-2.16.0.tar.gz", hash = "sha256:90f733b32e15dfc1999e6b7aca67a38688a567329de4d6e184154a73f96c6892"}, + {file = "sentry_sdk-2.19.2-py2.py3-none-any.whl", hash = "sha256:ebdc08228b4d131128e568d696c210d846e5b9d70aa0327dec6b1272d9d40b84"}, + {file = "sentry_sdk-2.19.2.tar.gz", hash = "sha256:467df6e126ba242d39952375dd816fbee0f217d119bf454a8ce74cf1e7909e8d"}, ] [package.dependencies] @@ -6422,14 +6433,16 @@ grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"] http2 = ["httpcore[http2] (==1.*)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] -huggingface-hub = ["huggingface-hub (>=0.22)"] +huggingface-hub = ["huggingface_hub (>=0.22)"] langchain = ["langchain (>=0.0.210)"] +launchdarkly = ["launchdarkly-server-sdk (>=9.8.0)"] litestar = ["litestar (>=2.0.0)"] loguru = ["loguru (>=0.5)"] openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] +openfeature = ["openfeature-sdk (>=0.7.1)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] opentelemetry-experimental = ["opentelemetry-distro"] -pure-eval = ["asttokens", "executing", "pure-eval"] +pure-eval = ["asttokens", "executing", "pure_eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] @@ -6442,99 +6455,96 @@ tornado = ["tornado (>=6)"] [[package]] name = "setproctitle" -version = "1.3.3" +version = "1.3.4" description = "A Python module to customize the process title" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:897a73208da48db41e687225f355ce993167079eda1260ba5e13c4e53be7f754"}, - {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c331e91a14ba4076f88c29c777ad6b58639530ed5b24b5564b5ed2fd7a95452"}, - {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbbd6c7de0771c84b4aa30e70b409565eb1fc13627a723ca6be774ed6b9d9fa3"}, - {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c05ac48ef16ee013b8a326c63e4610e2430dbec037ec5c5b58fcced550382b74"}, - {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1342f4fdb37f89d3e3c1c0a59d6ddbedbde838fff5c51178a7982993d238fe4f"}, - {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc74e84fdfa96821580fb5e9c0b0777c1c4779434ce16d3d62a9c4d8c710df39"}, - {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9617b676b95adb412bb69645d5b077d664b6882bb0d37bfdafbbb1b999568d85"}, - {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6a249415f5bb88b5e9e8c4db47f609e0bf0e20a75e8d744ea787f3092ba1f2d0"}, - {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:38da436a0aaace9add67b999eb6abe4b84397edf4a78ec28f264e5b4c9d53cd5"}, - {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:da0d57edd4c95bf221b2ebbaa061e65b1788f1544977288bdf95831b6e44e44d"}, - {file = "setproctitle-1.3.3-cp310-cp310-win32.whl", hash = "sha256:a1fcac43918b836ace25f69b1dca8c9395253ad8152b625064415b1d2f9be4fb"}, - {file = "setproctitle-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:200620c3b15388d7f3f97e0ae26599c0c378fdf07ae9ac5a13616e933cbd2086"}, - {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:334f7ed39895d692f753a443102dd5fed180c571eb6a48b2a5b7f5b3564908c8"}, - {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:950f6476d56ff7817a8fed4ab207727fc5260af83481b2a4b125f32844df513a"}, - {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:195c961f54a09eb2acabbfc90c413955cf16c6e2f8caa2adbf2237d1019c7dd8"}, - {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f05e66746bf9fe6a3397ec246fe481096664a9c97eb3fea6004735a4daf867fd"}, - {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b5901a31012a40ec913265b64e48c2a4059278d9f4e6be628441482dd13fb8b5"}, - {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64286f8a995f2cd934082b398fc63fca7d5ffe31f0e27e75b3ca6b4efda4e353"}, - {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:184239903bbc6b813b1a8fc86394dc6ca7d20e2ebe6f69f716bec301e4b0199d"}, - {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:664698ae0013f986118064b6676d7dcd28fefd0d7d5a5ae9497cbc10cba48fa5"}, - {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e5119a211c2e98ff18b9908ba62a3bd0e3fabb02a29277a7232a6fb4b2560aa0"}, - {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:417de6b2e214e837827067048f61841f5d7fc27926f2e43954567094051aff18"}, - {file = "setproctitle-1.3.3-cp311-cp311-win32.whl", hash = "sha256:6a143b31d758296dc2f440175f6c8e0b5301ced3b0f477b84ca43cdcf7f2f476"}, - {file = "setproctitle-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a680d62c399fa4b44899094027ec9a1bdaf6f31c650e44183b50d4c4d0ccc085"}, - {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d4460795a8a7a391e3567b902ec5bdf6c60a47d791c3b1d27080fc203d11c9dc"}, - {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bdfd7254745bb737ca1384dee57e6523651892f0ea2a7344490e9caefcc35e64"}, - {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:477d3da48e216d7fc04bddab67b0dcde633e19f484a146fd2a34bb0e9dbb4a1e"}, - {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ab2900d111e93aff5df9fddc64cf51ca4ef2c9f98702ce26524f1acc5a786ae7"}, - {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:088b9efc62d5aa5d6edf6cba1cf0c81f4488b5ce1c0342a8b67ae39d64001120"}, - {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6d50252377db62d6a0bb82cc898089916457f2db2041e1d03ce7fadd4a07381"}, - {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:87e668f9561fd3a457ba189edfc9e37709261287b52293c115ae3487a24b92f6"}, - {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:287490eb90e7a0ddd22e74c89a92cc922389daa95babc833c08cf80c84c4df0a"}, - {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:4fe1c49486109f72d502f8be569972e27f385fe632bd8895f4730df3c87d5ac8"}, - {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4a6ba2494a6449b1f477bd3e67935c2b7b0274f2f6dcd0f7c6aceae10c6c6ba3"}, - {file = "setproctitle-1.3.3-cp312-cp312-win32.whl", hash = "sha256:2df2b67e4b1d7498632e18c56722851ba4db5d6a0c91aaf0fd395111e51cdcf4"}, - {file = "setproctitle-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:f38d48abc121263f3b62943f84cbaede05749047e428409c2c199664feb6abc7"}, - {file = "setproctitle-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:816330675e3504ae4d9a2185c46b573105d2310c20b19ea2b4596a9460a4f674"}, - {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f960bc22d8d8e4ac886d1e2e21ccbd283adcf3c43136161c1ba0fa509088e0"}, - {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e6e7adff74796ef12753ff399491b8827f84f6c77659d71bd0b35870a17d8f"}, - {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53bc0d2358507596c22b02db079618451f3bd720755d88e3cccd840bafb4c41c"}, - {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6d20f9541f5f6ac63df553b6d7a04f313947f550eab6a61aa758b45f0d5657"}, - {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c1c84beab776b0becaa368254801e57692ed749d935469ac10e2b9b825dbdd8e"}, - {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:507e8dc2891021350eaea40a44ddd887c9f006e6b599af8d64a505c0f718f170"}, - {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b1067647ac7aba0b44b591936118a22847bda3c507b0a42d74272256a7a798e9"}, - {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2e71f6365744bf53714e8bd2522b3c9c1d83f52ffa6324bd7cbb4da707312cd8"}, - {file = "setproctitle-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:7f1d36a1e15a46e8ede4e953abb104fdbc0845a266ec0e99cc0492a4364f8c44"}, - {file = "setproctitle-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9a402881ec269d0cc9c354b149fc29f9ec1a1939a777f1c858cdb09c7a261df"}, - {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ff814dea1e5c492a4980e3e7d094286077054e7ea116cbeda138819db194b2cd"}, - {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:accb66d7b3ccb00d5cd11d8c6e07055a4568a24c95cf86109894dcc0c134cc89"}, - {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554eae5a5b28f02705b83a230e9d163d645c9a08914c0ad921df363a07cf39b1"}, - {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a911b26264dbe9e8066c7531c0591cfab27b464459c74385b276fe487ca91c12"}, - {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2982efe7640c4835f7355fdb4da313ad37fb3b40f5c69069912f8048f77b28c8"}, - {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df3f4274b80709d8bcab2f9a862973d453b308b97a0b423a501bcd93582852e3"}, - {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:af2c67ae4c795d1674a8d3ac1988676fa306bcfa1e23fddb5e0bd5f5635309ca"}, - {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:af4061f67fd7ec01624c5e3c21f6b7af2ef0e6bab7fbb43f209e6506c9ce0092"}, - {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:37a62cbe16d4c6294e84670b59cf7adcc73faafe6af07f8cb9adaf1f0e775b19"}, - {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a83ca086fbb017f0d87f240a8f9bbcf0809f3b754ee01cec928fff926542c450"}, - {file = "setproctitle-1.3.3-cp38-cp38-win32.whl", hash = "sha256:059f4ce86f8cc92e5860abfc43a1dceb21137b26a02373618d88f6b4b86ba9b2"}, - {file = "setproctitle-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:ab92e51cd4a218208efee4c6d37db7368fdf182f6e7ff148fb295ecddf264287"}, - {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c7951820b77abe03d88b114b998867c0f99da03859e5ab2623d94690848d3e45"}, - {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5bc94cf128676e8fac6503b37763adb378e2b6be1249d207630f83fc325d9b11"}, - {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f5d9027eeda64d353cf21a3ceb74bb1760bd534526c9214e19f052424b37e42"}, - {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e4a8104db15d3462e29d9946f26bed817a5b1d7a47eabca2d9dc2b995991503"}, - {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c32c41ace41f344d317399efff4cffb133e709cec2ef09c99e7a13e9f3b9483c"}, - {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbf16381c7bf7f963b58fb4daaa65684e10966ee14d26f5cc90f07049bfd8c1e"}, - {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e18b7bd0898398cc97ce2dfc83bb192a13a087ef6b2d5a8a36460311cb09e775"}, - {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69d565d20efe527bd8a9b92e7f299ae5e73b6c0470f3719bd66f3cd821e0d5bd"}, - {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ddedd300cd690a3b06e7eac90ed4452348b1348635777ce23d460d913b5b63c3"}, - {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:415bfcfd01d1fbf5cbd75004599ef167a533395955305f42220a585f64036081"}, - {file = "setproctitle-1.3.3-cp39-cp39-win32.whl", hash = "sha256:21112fcd2195d48f25760f0eafa7a76510871bbb3b750219310cf88b04456ae3"}, - {file = "setproctitle-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:5a740f05d0968a5a17da3d676ce6afefebeeeb5ce137510901bf6306ba8ee002"}, - {file = "setproctitle-1.3.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6b9e62ddb3db4b5205c0321dd69a406d8af9ee1693529d144e86bd43bcb4b6c0"}, - {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e3b99b338598de0bd6b2643bf8c343cf5ff70db3627af3ca427a5e1a1a90dd9"}, - {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ae9a02766dad331deb06855fb7a6ca15daea333b3967e214de12cfae8f0ef5"}, - {file = "setproctitle-1.3.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:200ede6fd11233085ba9b764eb055a2a191fb4ffb950c68675ac53c874c22e20"}, - {file = "setproctitle-1.3.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0d3a953c50776751e80fe755a380a64cb14d61e8762bd43041ab3f8cc436092f"}, - {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5e08e232b78ba3ac6bc0d23ce9e2bee8fad2be391b7e2da834fc9a45129eb87"}, - {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1da82c3e11284da4fcbf54957dafbf0655d2389cd3d54e4eaba636faf6d117a"}, - {file = "setproctitle-1.3.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:aeaa71fb9568ebe9b911ddb490c644fbd2006e8c940f21cb9a1e9425bd709574"}, - {file = "setproctitle-1.3.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:59335d000c6250c35989394661eb6287187854e94ac79ea22315469ee4f4c244"}, - {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3ba57029c9c50ecaf0c92bb127224cc2ea9fda057b5d99d3f348c9ec2855ad3"}, - {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d876d355c53d975c2ef9c4f2487c8f83dad6aeaaee1b6571453cb0ee992f55f6"}, - {file = "setproctitle-1.3.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:224602f0939e6fb9d5dd881be1229d485f3257b540f8a900d4271a2c2aa4e5f4"}, - {file = "setproctitle-1.3.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d7f27e0268af2d7503386e0e6be87fb9b6657afd96f5726b733837121146750d"}, - {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5e7266498cd31a4572378c61920af9f6b4676a73c299fce8ba93afd694f8ae7"}, - {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33c5609ad51cd99d388e55651b19148ea99727516132fb44680e1f28dd0d1de9"}, - {file = "setproctitle-1.3.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:eae8988e78192fd1a3245a6f4f382390b61bce6cfcc93f3809726e4c885fa68d"}, - {file = "setproctitle-1.3.3.tar.gz", hash = "sha256:c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae"}, + {file = "setproctitle-1.3.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0f6661a69c68349172ba7b4d5dd65fec2b0917abc99002425ad78c3e58cf7595"}, + {file = "setproctitle-1.3.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:754bac5e470adac7f7ec2239c485cd0b75f8197ca8a5b86ffb20eb3a3676cc42"}, + {file = "setproctitle-1.3.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7bc7088c15150745baf66db62a4ced4507d44419eb66207b609f91b64a682af"}, + {file = "setproctitle-1.3.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a46ef3ecf61e4840fbc1145fdd38acf158d0da7543eda7b773ed2b30f75c2830"}, + {file = "setproctitle-1.3.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffcb09d5c0ffa043254ec9a734a73f3791fec8bf6333592f906bb2e91ed2af1a"}, + {file = "setproctitle-1.3.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06c16b7a91cdc5d700271899e4383384a61aae83a3d53d0e2e5a266376083342"}, + {file = "setproctitle-1.3.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9f9732e59863eaeedd3feef94b2b216cb86d40dda4fad2d0f0aaec3b31592716"}, + {file = "setproctitle-1.3.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e152f4ab9ea1632b5fecdd87cee354f2b2eb6e2dfc3aceb0eb36a01c1e12f94c"}, + {file = "setproctitle-1.3.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:020ea47a79b2bbd7bd7b94b85ca956ba7cb026e82f41b20d2e1dac4008cead25"}, + {file = "setproctitle-1.3.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8c52b12b10e4057fc302bd09cb3e3f28bb382c30c044eb3396e805179a8260e4"}, + {file = "setproctitle-1.3.4-cp310-cp310-win32.whl", hash = "sha256:a65a147f545f3fac86f11acb2d0b316d3e78139a9372317b7eb50561b2817ba0"}, + {file = "setproctitle-1.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:66821fada6426998762a3650a37fba77e814a249a95b1183011070744aff47f6"}, + {file = "setproctitle-1.3.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0f749f07002c2d6fecf37cedc43207a88e6c651926a470a5f229070cf791879"}, + {file = "setproctitle-1.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:90ea8d302a5d30b948451d146e94674a3c5b020cc0ced9a1c28f8ddb0f203a5d"}, + {file = "setproctitle-1.3.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f859c88193ed466bee4eb9d45fbc29d2253e6aa3ccd9119c9a1d8d95f409a60d"}, + {file = "setproctitle-1.3.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b3afa5a0ed08a477ded239c05db14c19af585975194a00adf594d48533b23701"}, + {file = "setproctitle-1.3.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a78fce9018cc3e9a772b6537bbe3fe92380acf656c9f86db2f45e685af376e"}, + {file = "setproctitle-1.3.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d758e2eed2643afac5f2881542fbb5aa97640b54be20d0a5ed0691d02f0867d"}, + {file = "setproctitle-1.3.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ef133a1a2ee378d549048a12d56f4ef0e2b9113b0b25b6b77821e9af94d50634"}, + {file = "setproctitle-1.3.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1d2a154b79d5fb42d1eff06e05e22f0e8091261d877dd47b37d31352b74ecc37"}, + {file = "setproctitle-1.3.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:202eae632815571297833876a0f407d0d9c7ad9d843b38adbe687fe68c5192ee"}, + {file = "setproctitle-1.3.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2b0080819859e80a7776ac47cf6accb4b7ad313baf55fabac89c000480dcd103"}, + {file = "setproctitle-1.3.4-cp311-cp311-win32.whl", hash = "sha256:9c9d7d1267dee8c6627963d9376efa068858cfc8f573c083b1b6a2d297a8710f"}, + {file = "setproctitle-1.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:475986ddf6df65d619acd52188336a20f616589403f5a5ceb3fc70cdc137037a"}, + {file = "setproctitle-1.3.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d06990dcfcd41bb3543c18dd25c8476fbfe1f236757f42fef560f6aa03ac8dfc"}, + {file = "setproctitle-1.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:317218c9d8b17a010ab2d2f0851e8ef584077a38b1ba2b7c55c9e44e79a61e73"}, + {file = "setproctitle-1.3.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb5fefb53b9d9f334a5d9ec518a36b92a10b936011ac8a6b6dffd60135f16459"}, + {file = "setproctitle-1.3.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0855006261635e8669646c7c304b494b6df0a194d2626683520103153ad63cc9"}, + {file = "setproctitle-1.3.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a88e466fcaee659679c1d64dcb2eddbcb4bfadffeb68ba834d9c173a25b6184"}, + {file = "setproctitle-1.3.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f963b6ed8ba33eda374a98d979e8a0eaf21f891b6e334701693a2c9510613c4c"}, + {file = "setproctitle-1.3.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:122c2e05697fa91f5d23f00bbe98a9da1bd457b32529192e934095fadb0853f1"}, + {file = "setproctitle-1.3.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:1bba0a866f5895d5b769d8c36b161271c7fd407e5065862ab80ff91c29fbe554"}, + {file = "setproctitle-1.3.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:97f1f861998e326e640708488c442519ad69046374b2c3fe9bcc9869b387f23c"}, + {file = "setproctitle-1.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:726aee40357d4bdb70115442cb85ccc8e8bc554fc0bbbaa3a57cbe81df42287d"}, + {file = "setproctitle-1.3.4-cp312-cp312-win32.whl", hash = "sha256:04d6ba8b816dbb0bfd62000b0c3e583160893e6e8c4233e1dca1a9ae4d95d924"}, + {file = "setproctitle-1.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:9c76e43cb351ba8887371240b599925cdf3ecececc5dfb7125c71678e7722c55"}, + {file = "setproctitle-1.3.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d6e3b177e634aa6bbbfbf66d097b6d1cdb80fc60e912c7d8bace2e45699c07dd"}, + {file = "setproctitle-1.3.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6b17655a5f245b416e127e02087ea6347a48821cc4626bc0fd57101bfcd88afc"}, + {file = "setproctitle-1.3.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa5057a86df920faab8ee83960b724bace01a3231eb8e3f2c93d78283504d598"}, + {file = "setproctitle-1.3.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149fdfb8a26a555780c4ce53c92e6d3c990ef7b30f90a675eca02e83c6d5f76d"}, + {file = "setproctitle-1.3.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ded03546938a987f463c68ab98d683af87a83db7ac8093bbc179e77680be5ba2"}, + {file = "setproctitle-1.3.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ab9f5b7f2bbc1754bc6292d9a7312071058e5a891b0391e6d13b226133f36aa"}, + {file = "setproctitle-1.3.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0b19813c852566fa031902124336fa1f080c51e262fc90266a8c3d65ca47b74c"}, + {file = "setproctitle-1.3.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:db78b645dc63c0ccffca367a498f3b13492fb106a2243a1e998303ba79c996e2"}, + {file = "setproctitle-1.3.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b669aaac70bd9f03c070270b953f78d9ee56c4af6f0ff9f9cd3e6d1878c10b40"}, + {file = "setproctitle-1.3.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6dc3d656702791565994e64035a208be56b065675a5bc87b644c657d6d9e2232"}, + {file = "setproctitle-1.3.4-cp313-cp313-win32.whl", hash = "sha256:091f682809a4d12291cf0205517619d2e7014986b7b00ebecfde3d76f8ae5a8f"}, + {file = "setproctitle-1.3.4-cp313-cp313-win_amd64.whl", hash = "sha256:adcd6ba863a315702184d92d3d3bbff290514f24a14695d310f02ae5e28bd1f7"}, + {file = "setproctitle-1.3.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:acf41cf91bbc5a36d1fa4455a818bb02bf2a4ccfed2f892ba166ba2fcbb0ec8a"}, + {file = "setproctitle-1.3.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ceb3ce3262b0e8e088e4117175591b7a82b3bdc5e52e33b1e74778b5fb53fd38"}, + {file = "setproctitle-1.3.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b2ef636a6a25fe7f3d5a064bea0116b74a4c8c7df9646b17dc7386c439a26cf"}, + {file = "setproctitle-1.3.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28b8614de08679ae95bc4e8d6daaef6b61afdf027fa0d23bf13d619000286b3c"}, + {file = "setproctitle-1.3.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24f3c8be826a7d44181eac2269b15b748b76d98cd9a539d4c69f09321dcb5c12"}, + {file = "setproctitle-1.3.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc9d79b1bf833af63b7c720a6604eb16453ac1ad4e718eb8b59d1f97d986b98c"}, + {file = "setproctitle-1.3.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fb693000b65842c85356b667d057ae0d0bac6519feca7e1c437cc2cfeb0afc59"}, + {file = "setproctitle-1.3.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:a166251b8fbc6f2755e2ce9d3c11e9edb0c0c7d2ed723658ff0161fbce26ac1c"}, + {file = "setproctitle-1.3.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:0361428e6378911a378841509c56ba472d991cbed1a7e3078ec0cacc103da44a"}, + {file = "setproctitle-1.3.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:62d66e0423e3bd520b4c897063506b309843a8d07343fbfad04197e91a4edd28"}, + {file = "setproctitle-1.3.4-cp38-cp38-win32.whl", hash = "sha256:5edd01909348f3b0b2da329836d6b5419cd4869fec2e118e8ff3275b38af6267"}, + {file = "setproctitle-1.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:59e0dda9ad245921af0328035a961767026e1fa94bb65957ab0db0a0491325d6"}, + {file = "setproctitle-1.3.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bdaaa81a6e95a0a19fba0285f10577377f3503ae4e9988b403feba79da3e2f80"}, + {file = "setproctitle-1.3.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ee5b19a2d794463bcc19153dfceede7beec784b4cf7967dec0bc0fc212ab3a3"}, + {file = "setproctitle-1.3.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3058a1bb0c767b3a6ccbb38b27ef870af819923eb732e21e44a3f300370fe159"}, + {file = "setproctitle-1.3.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a97d37ee4fe0d1c6e87d2a97229c27a88787a8f4ebfbdeee95f91b818e52efe"}, + {file = "setproctitle-1.3.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e61dd7d05da11fc69bb86d51f1e0ee08f74dccf3ecf884c94de41135ffdc75d"}, + {file = "setproctitle-1.3.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eb115d53dc2a1299ae72f1119c96a556db36073bacb6da40c47ece5db0d9587"}, + {file = "setproctitle-1.3.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:342570716e2647a51ea859b8a9126da9dc1a96a0153c9c0a3514effd60ab57ad"}, + {file = "setproctitle-1.3.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0ad212ae2b03951367a69584af034579b34e1e4199a75d377ef9f8e08ee299b1"}, + {file = "setproctitle-1.3.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4afcb38e22122465013f4621b7e9ff8d42a7a48ae0ffeb94133a806cb91b4aad"}, + {file = "setproctitle-1.3.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:30bb223e6c3f95ad9e9bb2a113292759e947d1cfd60dbd4adb55851c370006b2"}, + {file = "setproctitle-1.3.4-cp39-cp39-win32.whl", hash = "sha256:5f0521ed3bb9f02e9486573ea95e2062cd6bf036fa44e640bd54a06f22d85f35"}, + {file = "setproctitle-1.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:0baadeb27f9e97e65922b4151f818b19c311d30b9efdb62af0e53b3db4006ce2"}, + {file = "setproctitle-1.3.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:939d364a187b2adfbf6ae488664277e717d56c7951a4ddeb4f23b281bc50bfe5"}, + {file = "setproctitle-1.3.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb8a6a19be0cbf6da6fcbf3698b76c8af03fe83e4bd77c96c3922be3b88bf7da"}, + {file = "setproctitle-1.3.4-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:779006f9e1aade9522a40e8d9635115ab15dd82b7af8e655967162e9c01e2573"}, + {file = "setproctitle-1.3.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5519f2a7b8c535b0f1f77b30441476571373add72008230c81211ee17b423b57"}, + {file = "setproctitle-1.3.4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:743836d484151334ebba1490d6907ca9e718fe815dcd5756f2a01bc3067d099c"}, + {file = "setproctitle-1.3.4-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abda20aff8d1751e48d7967fa8945fef38536b82366c49be39b83678d4be3893"}, + {file = "setproctitle-1.3.4-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a2041b5788ce52f218b5be94af458e04470f997ab46fdebd57cf0b8374cc20e"}, + {file = "setproctitle-1.3.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2c3b1ce68746557aa6e6f4547e76883925cdc7f8d7c7a9f518acd203f1265ca5"}, + {file = "setproctitle-1.3.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0b6a4cbabf024cb263a45bdef425760f14470247ff223f0ec51699ca9046c0fe"}, + {file = "setproctitle-1.3.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3e55d7ecc68bdc80de5a553691a3ed260395d5362c19a266cf83cbb4e046551f"}, + {file = "setproctitle-1.3.4-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02ca3802902d91a89957f79da3ec44b25b5804c88026362cb85eea7c1fbdefd1"}, + {file = "setproctitle-1.3.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:47669fc8ed8b27baa2d698104732234b5389f6a59c37c046f6bcbf9150f7a94e"}, + {file = "setproctitle-1.3.4.tar.gz", hash = "sha256:3b40d32a3e1f04e94231ed6dfee0da9e43b4f9c6b5450d53e6dd7754c34e0c50"}, ] [package.extras] @@ -6542,23 +6552,23 @@ test = ["pytest"] [[package]] name = "setuptools" -version = "75.1.0" +version = "75.7.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, - {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, + {file = "setuptools-75.7.0-py3-none-any.whl", hash = "sha256:84fb203f278ebcf5cd08f97d3fb96d3fbed4b629d500b29ad60d11e00769b183"}, + {file = "setuptools-75.7.0.tar.gz", hash = "sha256:886ff7b16cd342f1d1defc16fc98c9ce3fde69e087a4e1983d7ab634e5f41f4f"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] +core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "sh" @@ -6631,35 +6641,35 @@ test = ["pytest", "pytest-cov"] [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] name = "smmap" -version = "5.0.1" +version = "5.0.2" description = "A pure Python implementation of a sliding window memory map manager" optional = false python-versions = ">=3.7" files = [ - {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, - {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, + {file = "smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e"}, + {file = "smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5"}, ] [[package]] name = "snakeviz" -version = "2.2.0" +version = "2.2.2" description = "A web-based viewer for Python profiler output" optional = true -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "snakeviz-2.2.0-py2.py3-none-any.whl", hash = "sha256:569e2d71c47f80a886aa6e70d6405cb6d30aa3520969ad956b06f824c5f02b8e"}, - {file = "snakeviz-2.2.0.tar.gz", hash = "sha256:7bfd00be7ae147eb4a170a471578e1cd3f41f803238958b6b8efcf2c698a6aa9"}, + {file = "snakeviz-2.2.2-py3-none-any.whl", hash = "sha256:77e7b9c82f6152edc330040319b97612351cd9b48c706434c535c2df31d10ac5"}, + {file = "snakeviz-2.2.2.tar.gz", hash = "sha256:08028c6f8e34a032ff14757a38424770abb8662fb2818985aeea0d9bc13a7d83"}, ] [package.dependencies] @@ -6689,22 +6699,26 @@ files = [ [[package]] name = "speechrecognition" -version = "3.10.4" +version = "3.13.0" description = "Library for performing speech recognition, with support for several engines and APIs, online and offline." optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "SpeechRecognition-3.10.4-py2.py3-none-any.whl", hash = "sha256:723b8155692a8ed11a30013f15f89a3e57c5dc8bc73c8cb024bf9bd14c21fba5"}, - {file = "speechrecognition-3.10.4.tar.gz", hash = "sha256:986bafcf61f14625c2f3cea6a471838edd379ed68aeed7b8f3c0fb41e21f1125"}, + {file = "SpeechRecognition-3.13.0-py3-none-any.whl", hash = "sha256:44ca68ffbc55910ec0d26f009632a67367e2c81934934babe52456a896e983e1"}, + {file = "speechrecognition-3.13.0.tar.gz", hash = "sha256:fdf42f4ad3cc05df7dd4a651ae090f313e34a5f6389f09134594e4ada9d28b19"}, ] [package.dependencies] -requests = ">=2.26.0" typing-extensions = "*" [package.extras] -dev = ["flake8", "rstcheck"] -whisper-api = ["openai"] +assemblyai = ["requests"] +audio = ["PyAudio (>=0.2.11)"] +dev = ["pytest", "pytest-randomly", "respx"] +google-cloud = ["google-cloud-speech"] +groq = ["groq", "httpx (<0.28)"] +openai = ["httpx (<0.28)", "openai"] +pocketsphinx = ["pocketsphinx (<5)"] whisper-local = ["openai-whisper", "soundfile"] [[package]] @@ -6820,35 +6834,35 @@ typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"] [[package]] name = "tifffile" -version = "2024.9.20" +version = "2024.12.12" description = "Read and write TIFF files" optional = true python-versions = ">=3.10" files = [ - {file = "tifffile-2024.9.20-py3-none-any.whl", hash = "sha256:c54dc85bc1065d972cb8a6ffb3181389d597876aa80177933459733e4ed243dd"}, - {file = "tifffile-2024.9.20.tar.gz", hash = "sha256:3fbf3be2f995a7051a8ae05a4be70c96fc0789f22ed6f1c4104c973cf68a640b"}, + {file = "tifffile-2024.12.12-py3-none-any.whl", hash = "sha256:6ff0f196a46a75c8c0661c70995e06ea4d08a81fe343193e69f1673f4807d508"}, + {file = "tifffile-2024.12.12.tar.gz", hash = "sha256:c38e929bf74c04b6c8708d87f16b32c85c6d7c2514b99559ea3db8003ba4edda"}, ] [package.dependencies] numpy = "*" [package.extras] -all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"] +all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr (<3)"] codecs = ["imagecodecs (>=2023.8.12)"] plot = ["matplotlib"] -test = ["cmapfile", "czifile", "dask", "defusedxml", "fsspec", "imagecodecs", "lfdfiles", "lxml", "ndtiff", "oiffile", "psdtags", "pytest", "roifile", "xarray", "zarr"] +test = ["cmapfile", "czifile", "dask", "defusedxml", "fsspec", "imagecodecs", "lfdfiles", "lxml", "ndtiff", "oiffile", "psdtags", "pytest", "roifile", "xarray", "zarr (<3)"] xml = ["defusedxml", "lxml"] -zarr = ["fsspec", "zarr"] +zarr = ["fsspec", "zarr (<3)"] [[package]] name = "tinycss2" -version = "1.3.0" +version = "1.4.0" description = "A tiny CSS parser" optional = true python-versions = ">=3.8" files = [ - {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"}, - {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"}, + {file = "tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289"}, + {file = "tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7"}, ] [package.dependencies] @@ -6871,13 +6885,43 @@ files = [ [[package]] name = "tomli" -version = "2.0.2" +version = "2.2.1" description = "A lil' TOML parser" optional = true python-versions = ">=3.8" files = [ - {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, - {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] [[package]] @@ -6974,40 +7018,41 @@ scipy = ["scipy"] [[package]] name = "tornado" -version = "6.4.1" +version = "6.4.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = true python-versions = ">=3.8" files = [ - {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, - {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"}, - {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"}, - {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"}, - {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, + {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"}, + {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"}, + {file = "tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec"}, + {file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946"}, + {file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf"}, + {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634"}, + {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73"}, + {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c"}, + {file = "tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482"}, + {file = "tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38"}, + {file = "tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b"}, ] [[package]] name = "tqdm" -version = "4.66.5" +version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, - {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, ] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] -dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] @@ -7086,13 +7131,13 @@ tutorials = ["matplotlib", "pandas", "tabulate"] [[package]] name = "types-python-dateutil" -version = "2.9.0.20241003" +version = "2.9.0.20241206" description = "Typing stubs for python-dateutil" optional = true python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, - {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, + {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"}, + {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"}, ] [[package]] @@ -7134,13 +7179,13 @@ files = [ [[package]] name = "urchin" -version = "0.0.27" +version = "0.0.29" description = "URDF parser and manipulator for Python" optional = true python-versions = "*" files = [ - {file = "urchin-0.0.27-py3-none-any.whl", hash = "sha256:e4cf43c8f52a44e0075e1778b76c203922085dd1fb9340cd703bf54188208611"}, - {file = "urchin-0.0.27.tar.gz", hash = "sha256:bda308ed7d2b80eb1e097dc3963fabe9e00a6cbd89a1f6be6f063c2a065d3671"}, + {file = "urchin-0.0.29-py3-none-any.whl", hash = "sha256:5ea42c921b432e686adfd12270396d47ee18a853562a7c9c0c896cf6f2ef90fa"}, + {file = "urchin-0.0.29.tar.gz", hash = "sha256:61c5c4276b4ab3768767e6162f9a950c8bc35f0f162cf85396c4a4ac8a79028a"}, ] [package.dependencies] @@ -7188,13 +7233,13 @@ dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake [[package]] name = "urllib3" -version = "2.2.3" +version = "2.3.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, + {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, + {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, ] [package.extras] @@ -7205,13 +7250,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.26.6" +version = "20.28.1" description = "Virtual Python Environment builder" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, - {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, + {file = "virtualenv-20.28.1-py3-none-any.whl", hash = "sha256:412773c85d4dab0409b83ec36f7a6499e72eaf08c80e81e9576bca61831c71cb"}, + {file = "virtualenv-20.28.1.tar.gz", hash = "sha256:5d34ab240fdb5d21549b76f9e8ff3af28252f5499fb6d6f031adac4e5a8c5329"}, ] [package.dependencies] @@ -7225,21 +7270,22 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [[package]] name = "wandb" -version = "0.18.3" +version = "0.19.1" description = "A CLI and library for interacting with the Weights & Biases API." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "wandb-0.18.3-py3-none-any.whl", hash = "sha256:7da64f7da0ff7572439de10bfd45534e8811e71e78ac2ccc3b818f1c0f3a9aef"}, - {file = "wandb-0.18.3-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:6674d8a5c40c79065b9c7eb765136756d5ebc9457a5f9abc820a660fb23f8b67"}, - {file = "wandb-0.18.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:741f566e409a2684d3047e4cc25e8e914d78196b901190937b24b6abb8b052e5"}, - {file = "wandb-0.18.3-py3-none-macosx_11_0_x86_64.whl", hash = "sha256:8be5e877570b693001c52dcc2089e48e6a4dcbf15f3adf5c9349f95148b59d58"}, - {file = "wandb-0.18.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d788852bd4739fa18de3918f309c3a955b5cef3247fae1c40df3a63af637e1a0"}, - {file = "wandb-0.18.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab81424eb207d78239a8d69c90521a70074fb81e3709055484e43c76fe44dc08"}, - {file = "wandb-0.18.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:2c91315b8b62423eae18577d66a4b4bb8e4341a7d5c849cb2963e3b3dff0bf6d"}, - {file = "wandb-0.18.3-py3-none-win32.whl", hash = "sha256:92a647dab783938ec87776a9fae8a13e72e6dad939c53e357cdea9d2570f0ad8"}, - {file = "wandb-0.18.3-py3-none-win_amd64.whl", hash = "sha256:29cac2cfa3124241fed22cfedc9a52e1500275ee9bbb0b428ce4bf63c4723bf0"}, - {file = "wandb-0.18.3.tar.gz", hash = "sha256:eb2574cea72bc908c6ce1b37edf7a889619e6e06e1b4714eecfe0662ded43c06"}, + {file = "wandb-0.19.1-py3-none-any.whl", hash = "sha256:b3195b3fe4d1b8131f64b956e6a5de7486cecfec179570986dbd6c64cd29b3c5"}, + {file = "wandb-0.19.1-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:788c20d8c3dabe490b50961dc91298886853dd8a0276a09ef3fc5c7f1f137c1d"}, + {file = "wandb-0.19.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:343d46c59aba3c30cf98ce8e0b9a2e1e52986a0ac0433d092de9aa856aeece98"}, + {file = "wandb-0.19.1-py3-none-macosx_11_0_x86_64.whl", hash = "sha256:7541efa8ffab715ba932fcb5117c4255a47cadebf0365d1dc1eb684a94744573"}, + {file = "wandb-0.19.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec64a859478b9f5bcf894aedd2bcccaf6917abe7b8adbd722b2a43b7063d33db"}, + {file = "wandb-0.19.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:405000bc3d2e369934ff1266fcc55ff968e4a0f24c2fdaa9a0585b170c01b60c"}, + {file = "wandb-0.19.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:809b5ae83ed314b97db1077490f37d6c926c7c96fad9b6b5a2476534d54defb4"}, + {file = "wandb-0.19.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e01e9176b5ca9660226edcbfd9323019aa9bd5789a4b384d23ba53e062d3966e"}, + {file = "wandb-0.19.1-py3-none-win32.whl", hash = "sha256:093cc5c39ce629390c4f465b1ae89bab2ee9b29c2a46c8b5143858dd8c73264b"}, + {file = "wandb-0.19.1-py3-none-win_amd64.whl", hash = "sha256:1fc9d403fffb84e37f4e56a075b26b639e9f489899c9b9db9f46e3a7b7d93c64"}, + {file = "wandb-0.19.1.tar.gz", hash = "sha256:a9b4bf790c468e7b350eeaba2de002672a5cbaa3049899ab060940e2388f429a"}, ] [package.dependencies] @@ -7249,11 +7295,13 @@ gitpython = ">=1.0.0,<3.1.29 || >3.1.29" platformdirs = "*" protobuf = {version = ">=3.19.0,<4.21.0 || >4.21.0,<5.28.0 || >5.28.0,<6", markers = "python_version > \"3.9\" or sys_platform != \"linux\""} psutil = ">=5.0.0" +pydantic = ">=2.6,<3" pyyaml = "*" requests = ">=2.0.0,<3" -sentry-sdk = ">=1.0.0" +sentry-sdk = ">=2.0.0" setproctitle = "*" setuptools = "*" +typing-extensions = {version = ">=4.4,<5", markers = "python_version < \"3.12\""} [package.extras] aws = ["boto3"] @@ -7281,19 +7329,15 @@ files = [ [[package]] name = "webcolors" -version = "24.8.0" +version = "24.11.1" description = "A library for working with the color formats defined by HTML and CSS." optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "webcolors-24.8.0-py3-none-any.whl", hash = "sha256:fc4c3b59358ada164552084a8ebee637c221e4059267d0f8325b3b560f6c7f0a"}, - {file = "webcolors-24.8.0.tar.gz", hash = "sha256:08b07af286a01bcd30d583a7acadf629583d1f79bfef27dd2c2c5c263817277d"}, + {file = "webcolors-24.11.1-py3-none-any.whl", hash = "sha256:515291393b4cdf0eb19c155749a096f779f7d909f7cceea072791cb9095b92e9"}, + {file = "webcolors-24.11.1.tar.gz", hash = "sha256:ecb3d768f32202af770477b8b65f318fa4f566c22948673a977b00d589dd80f6"}, ] -[package.extras] -docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"] -tests = ["coverage[toml]"] - [[package]] name = "webencodings" version = "0.5.1" @@ -7323,13 +7367,13 @@ test = ["websockets"] [[package]] name = "werkzeug" -version = "3.1.1" +version = "3.1.3" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.9" files = [ - {file = "werkzeug-3.1.1-py3-none-any.whl", hash = "sha256:a71124d1ef06008baafa3d266c02f56e1836a5984afd6dd6c9230669d60d9fb5"}, - {file = "werkzeug-3.1.1.tar.gz", hash = "sha256:8cd39dfbdfc1e051965f156163e2974e52c210f130810e9ad36858f0fd3edad4"}, + {file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"}, + {file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"}, ] [package.dependencies] @@ -7351,13 +7395,13 @@ files = [ [[package]] name = "xmltodict" -version = "0.14.1" +version = "0.14.2" description = "Makes working with XML feel like you are working with JSON" optional = true python-versions = ">=3.6" files = [ - {file = "xmltodict-0.14.1-py2.py3-none-any.whl", hash = "sha256:3ef4a7b71c08f19047fcbea572e1d7f4207ab269da1565b5d40e9823d3894e63"}, - {file = "xmltodict-0.14.1.tar.gz", hash = "sha256:338c8431e4fc554517651972d62f06958718f6262b04316917008e8fd677a6b0"}, + {file = "xmltodict-0.14.2-py2.py3-none-any.whl", hash = "sha256:20cc7d723ed729276e808f26fb6b3599f786cbc37e06c65e192ba77c40f20aac"}, + {file = "xmltodict-0.14.2.tar.gz", hash = "sha256:201e7c28bb210e374999d1dde6382923ab0ed1a8a5faeece48ab525b7810a553"}, ] [[package]] @@ -7494,103 +7538,93 @@ files = [ [[package]] name = "yarl" -version = "1.14.0" +version = "1.18.3" description = "Yet another URL library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1bfc25aa6a7c99cf86564210f79a0b7d4484159c67e01232b116e445b3036547"}, - {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0cf21f46a15d445417de8fc89f2568852cf57fe8ca1ab3d19ddb24d45c0383ae"}, - {file = "yarl-1.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1dda53508df0de87b6e6b0a52d6718ff6c62a5aca8f5552748404963df639269"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:587c3cc59bc148a9b1c07a019346eda2549bc9f468acd2f9824d185749acf0a6"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3007a5b75cb50140708420fe688c393e71139324df599434633019314ceb8b59"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06ff23462398333c78b6f4f8d3d70410d657a471c2c5bbe6086133be43fc8f1a"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689a99a42ee4583fcb0d3a67a0204664aa1539684aed72bdafcbd505197a91c4"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0547ab1e9345dc468cac8368d88ea4c5bd473ebc1d8d755347d7401982b5dd8"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:742aef0a99844faaac200564ea6f5e08facb285d37ea18bd1a5acf2771f3255a"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:176110bff341b6730f64a1eb3a7070e12b373cf1c910a9337e7c3240497db76f"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46a9772a1efa93f9cd170ad33101c1817c77e0e9914d4fe33e2da299d7cf0f9b"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ee2c68e4f2dd1b1c15b849ba1c96fac105fca6ffdb7c1e8be51da6fabbdeafb9"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:047b258e00b99091b6f90355521f026238c63bd76dcf996d93527bb13320eefd"}, - {file = "yarl-1.14.0-cp310-cp310-win32.whl", hash = "sha256:0aa92e3e30a04f9462a25077db689c4ac5ea9ab6cc68a2e563881b987d42f16d"}, - {file = "yarl-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:d9baec588f015d0ee564057aa7574313c53a530662ffad930b7886becc85abdf"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:07f9eaf57719d6721ab15805d85f4b01a5b509a0868d7320134371bcb652152d"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c14b504a74e58e2deb0378b3eca10f3d076635c100f45b113c18c770b4a47a50"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:16a682a127930f3fc4e42583becca6049e1d7214bcad23520c590edd741d2114"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73bedd2be05f48af19f0f2e9e1353921ce0c83f4a1c9e8556ecdcf1f1eae4892"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3ab950f8814f3b7b5e3eebc117986f817ec933676f68f0a6c5b2137dd7c9c69"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b693c63e7e64b524f54aa4888403c680342d1ad0d97be1707c531584d6aeeb4f"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85cb3e40eaa98489f1e2e8b29f5ad02ee1ee40d6ce6b88d50cf0f205de1d9d2c"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f24f08b6c9b9818fd80612c97857d28f9779f0d1211653ece9844fc7b414df2"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29a84a46ec3ebae7a1c024c055612b11e9363a8a23238b3e905552d77a2bc51b"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5cd5dad8366e0168e0fd23d10705a603790484a6dbb9eb272b33673b8f2cce72"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a152751af7ef7b5d5fa6d215756e508dd05eb07d0cf2ba51f3e740076aa74373"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3d569f877ed9a708e4c71a2d13d2940cb0791da309f70bd970ac1a5c088a0a92"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6a615cad11ec3428020fb3c5a88d85ce1b5c69fd66e9fcb91a7daa5e855325dd"}, - {file = "yarl-1.14.0-cp311-cp311-win32.whl", hash = "sha256:bab03192091681d54e8225c53f270b0517637915d9297028409a2a5114ff4634"}, - {file = "yarl-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:985623575e5c4ea763056ffe0e2d63836f771a8c294b3de06d09480538316b13"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fc2c80bc87fba076e6cbb926216c27fba274dae7100a7b9a0983b53132dd99f2"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:55c144d363ad4626ca744556c049c94e2b95096041ac87098bb363dcc8635e8d"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b03384eed107dbeb5f625a99dc3a7de8be04fc8480c9ad42fccbc73434170b20"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f72a0d746d38cb299b79ce3d4d60ba0892c84bbc905d0d49c13df5bace1b65f8"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8648180b34faaea4aa5b5ca7e871d9eb1277033fa439693855cf0ea9195f85f1"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9557c9322aaa33174d285b0c1961fb32499d65ad1866155b7845edc876c3c835"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f50eb3837012a937a2b649ec872b66ba9541ad9d6f103ddcafb8231cfcafd22"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8892fa575ac9b1b25fae7b221bc4792a273877b9b56a99ee2d8d03eeb3dbb1d2"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6a2c5c5bb2556dfbfffffc2bcfb9c235fd2b566d5006dfb2a37afc7e3278a07"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ab3abc0b78a5dfaa4795a6afbe7b282b6aa88d81cf8c1bb5e394993d7cae3457"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:47eede5d11d669ab3759b63afb70d28d5328c14744b8edba3323e27dc52d298d"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fe4d2536c827f508348d7b40c08767e8c7071614250927233bf0c92170451c0a"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0fd7b941dd1b00b5f0acb97455fea2c4b7aac2dd31ea43fb9d155e9bc7b78664"}, - {file = "yarl-1.14.0-cp312-cp312-win32.whl", hash = "sha256:99ff3744f5fe48288be6bc402533b38e89749623a43208e1d57091fc96b783b9"}, - {file = "yarl-1.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ca3894e9e9f72da93544f64988d9c052254a338a9f855165f37f51edb6591de"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d02d700705d67e09e1f57681f758f0b9d4412eeb70b2eb8d96ca6200b486db3"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:30600ba5db60f7c0820ef38a2568bb7379e1418ecc947a0f76fd8b2ff4257a97"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e85d86527baebb41a214cc3b45c17177177d900a2ad5783dbe6f291642d4906f"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37001e5d4621cef710c8dc1429ca04e189e572f128ab12312eab4e04cf007132"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4f4547944d4f5cfcdc03f3f097d6f05bbbc915eaaf80a2ee120d0e756de377d"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75ff4c819757f9bdb35de049a509814d6ce851fe26f06eb95a392a5640052482"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68ac1a09392ed6e3fd14be880d39b951d7b981fd135416db7d18a6208c536561"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96952f642ac69075e44c7d0284528938fdff39422a1d90d3e45ce40b72e5e2d9"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a56fbe3d7f3bce1d060ea18d2413a2ca9ca814eea7cedc4d247b5f338d54844e"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7e2637d75e92763d1322cb5041573279ec43a80c0f7fbbd2d64f5aee98447b17"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9abe80ae2c9d37c17599557b712e6515f4100a80efb2cda15f5f070306477cd2"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:217a782020b875538eebf3948fac3a7f9bbbd0fd9bf8538f7c2ad7489e80f4e8"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9cfef3f14f75bf6aba73a76caf61f9d00865912a04a4393c468a7ce0981b519"}, - {file = "yarl-1.14.0-cp313-cp313-win32.whl", hash = "sha256:d8361c7d04e6a264481f0b802e395f647cd3f8bbe27acfa7c12049efea675bd1"}, - {file = "yarl-1.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:bc24f968b82455f336b79bf37dbb243b7d76cd40897489888d663d4e028f5069"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:91d875f75fabf76b3018c5f196bf3d308ed2b49ddcb46c1576d6b075754a1393"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4009def9be3a7e5175db20aa2d7307ecd00bbf50f7f0f989300710eee1d0b0b9"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:582cedde49603f139be572252a318b30dc41039bc0b8165f070f279e5d12187f"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbd9ff43a04f8ffe8a959a944c2dca10d22f5f99fc6a459f49c3ebfb409309d9"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f805e37ed16cc212fdc538a608422d7517e7faf539bedea4fe69425bc55d76"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95e16e9eaa2d7f5d87421b8fe694dd71606aa61d74b824c8d17fc85cc51983d1"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:816d24f584edefcc5ca63428f0b38fee00b39fe64e3c5e558f895a18983efe96"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd2660c01367eb3ef081b8fa0a5da7fe767f9427aa82023a961a5f28f0d4af6c"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:94b2bb9bcfd5be9d27004ea4398fb640373dd0c1a9e219084f42c08f77a720ab"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c2089a9afef887664115f7fa6d3c0edd6454adaca5488dba836ca91f60401075"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2192f718db4a8509f63dd6d950f143279211fa7e6a2c612edc17d85bf043d36e"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:8385ab36bf812e9d37cf7613999a87715f27ef67a53f0687d28c44b819df7cb0"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b4c1ecba93e7826dc71ddba75fb7740cdb52e7bd0be9f03136b83f54e6a1f511"}, - {file = "yarl-1.14.0-cp38-cp38-win32.whl", hash = "sha256:e749af6c912a7bb441d105c50c1a3da720474e8acb91c89350080dd600228f0e"}, - {file = "yarl-1.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:147e36331f6f63e08a14640acf12369e041e0751bb70d9362df68c2d9dcf0c87"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a9f917966d27f7ce30039fe8d900f913c5304134096554fd9bea0774bcda6d1"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a2f8fb7f944bcdfecd4e8d855f84c703804a594da5123dd206f75036e536d4d"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f4e475f29a9122f908d0f1f706e1f2fc3656536ffd21014ff8a6f2e1b14d1d8"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8089d4634d8fa2b1806ce44fefa4979b1ab2c12c0bc7ef3dfa45c8a374811348"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b16f6c75cffc2dc0616ea295abb0e1967601bd1fb1e0af6a1de1c6c887f3439"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498b3c55087b9d762636bca9b45f60d37e51d24341786dc01b81253f9552a607"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3f8bfc1db82589ef965ed234b87de30d140db8b6dc50ada9e33951ccd8ec07a"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:625f207b1799e95e7c823f42f473c1e9dbfb6192bd56bba8695656d92be4535f"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:781e2495e408a81e4eaeedeb41ba32b63b1980dddf8b60dbbeff6036bcd35049"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:659603d26d40dd4463200df9bfbc339fbfaed3fe32e5c432fe1dc2b5d4aa94b4"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4e0d45ebf975634468682c8bec021618b3ad52c37619e5c938f8f831fa1ac5c0"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a2e4725a08cb2b4794db09e350c86dee18202bb8286527210e13a1514dc9a59a"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:19268b4fec1d7760134f2de46ef2608c2920134fb1fa61e451f679e41356dc55"}, - {file = "yarl-1.14.0-cp39-cp39-win32.whl", hash = "sha256:337912bcdcf193ade64b9aae5a4017a0a1950caf8ca140362e361543c6773f21"}, - {file = "yarl-1.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:b6d0147574ce2e7b812c989e50fa72bbc5338045411a836bd066ce5fc8ac0bce"}, - {file = "yarl-1.14.0-py3-none-any.whl", hash = "sha256:c8ed4034f0765f8861620c1f2f2364d2e58520ea288497084dae880424fc0d9f"}, - {file = "yarl-1.14.0.tar.gz", hash = "sha256:88c7d9d58aab0724b979ab5617330acb1c7030b79379c8138c1c8c94e121d1b3"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602d98f2c2d929f8e697ed274fbadc09902c4025c5a9963bf4e9edfc3ab6f7ed"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c654d5207c78e0bd6d749f6dae1dcbbfde3403ad3a4b11f3c5544d9906969dde"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5094d9206c64181d0f6e76ebd8fb2f8fe274950a63890ee9e0ebfd58bf9d787b"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35098b24e0327fc4ebdc8ffe336cee0a87a700c24ffed13161af80124b7dc8e5"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3236da9272872443f81fedc389bace88408f64f89f75d1bdb2256069a8730ccc"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c08cc9b16f4f4bc522771d96734c7901e7ebef70c6c5c35dd0f10845270bcd"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80316a8bd5109320d38eef8833ccf5f89608c9107d02d2a7f985f98ed6876990"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c1e1cc06da1491e6734f0ea1e6294ce00792193c463350626571c287c9a704db"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fea09ca13323376a2fdfb353a5fa2e59f90cd18d7ca4eaa1fd31f0a8b4f91e62"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e3b9fd71836999aad54084906f8663dffcd2a7fb5cdafd6c37713b2e72be1760"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:757e81cae69244257d125ff31663249b3013b5dc0a8520d73694aed497fb195b"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b1771de9944d875f1b98a745bc547e684b863abf8f8287da8466cf470ef52690"}, + {file = "yarl-1.18.3-cp310-cp310-win32.whl", hash = "sha256:8874027a53e3aea659a6d62751800cf6e63314c160fd607489ba5c2edd753cf6"}, + {file = "yarl-1.18.3-cp310-cp310-win_amd64.whl", hash = "sha256:93b2e109287f93db79210f86deb6b9bbb81ac32fc97236b16f7433db7fc437d8"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8503ad47387b8ebd39cbbbdf0bf113e17330ffd339ba1144074da24c545f0069"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02ddb6756f8f4517a2d5e99d8b2f272488e18dd0bfbc802f31c16c6c20f22193"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67a283dd2882ac98cc6318384f565bffc751ab564605959df4752d42483ad889"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d980e0325b6eddc81331d3f4551e2a333999fb176fd153e075c6d1c2530aa8a8"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b643562c12680b01e17239be267bc306bbc6aac1f34f6444d1bded0c5ce438ca"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c017a3b6df3a1bd45b9fa49a0f54005e53fbcad16633870104b66fa1a30a29d8"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75674776d96d7b851b6498f17824ba17849d790a44d282929c42dbb77d4f17ae"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccaa3a4b521b780a7e771cc336a2dba389a0861592bbce09a476190bb0c8b4b3"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d06d3005e668744e11ed80812e61efd77d70bb7f03e33c1598c301eea20efbb"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:9d41beda9dc97ca9ab0b9888cb71f7539124bc05df02c0cff6e5acc5a19dcc6e"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ba23302c0c61a9999784e73809427c9dbedd79f66a13d84ad1b1943802eaaf59"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6748dbf9bfa5ba1afcc7556b71cda0d7ce5f24768043a02a58846e4a443d808d"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0b0cad37311123211dc91eadcb322ef4d4a66008d3e1bdc404808992260e1a0e"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fb2171a4486bb075316ee754c6d8382ea6eb8b399d4ec62fde2b591f879778a"}, + {file = "yarl-1.18.3-cp311-cp311-win32.whl", hash = "sha256:61b1a825a13bef4a5f10b1885245377d3cd0bf87cba068e1d9a88c2ae36880e1"}, + {file = "yarl-1.18.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9d60031cf568c627d028239693fd718025719c02c9f55df0a53e587aab951b5"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285"}, + {file = "yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2"}, + {file = "yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8"}, + {file = "yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d"}, + {file = "yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:61e5e68cb65ac8f547f6b5ef933f510134a6bf31bb178be428994b0cb46c2a04"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe57328fbc1bfd0bd0514470ac692630f3901c0ee39052ae47acd1d90a436719"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a440a2a624683108a1b454705ecd7afc1c3438a08e890a1513d468671d90a04e"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c7907c8548bcd6ab860e5f513e727c53b4a714f459b084f6580b49fa1b9cee"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4f6450109834af88cb4cc5ecddfc5380ebb9c228695afc11915a0bf82116789"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9ca04806f3be0ac6d558fffc2fdf8fcef767e0489d2684a21912cc4ed0cd1b8"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77a6e85b90a7641d2e07184df5557132a337f136250caafc9ccaa4a2a998ca2c"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6333c5a377c8e2f5fae35e7b8f145c617b02c939d04110c76f29ee3676b5f9a5"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0b3c92fa08759dbf12b3a59579a4096ba9af8dd344d9a813fc7f5070d86bbab1"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ac515b860c36becb81bb84b667466885096b5fc85596948548b667da3bf9f24"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:045b8482ce9483ada4f3f23b3774f4e1bf4f23a2d5c912ed5170f68efb053318"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a4bb030cf46a434ec0225bddbebd4b89e6471814ca851abb8696170adb163985"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:54d6921f07555713b9300bee9c50fb46e57e2e639027089b1d795ecd9f7fa910"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1d407181cfa6e70077df3377938c08012d18893f9f20e92f7d2f314a437c30b1"}, + {file = "yarl-1.18.3-cp39-cp39-win32.whl", hash = "sha256:ac36703a585e0929b032fbaab0707b75dc12703766d0b53486eabd5139ebadd5"}, + {file = "yarl-1.18.3-cp39-cp39-win_amd64.whl", hash = "sha256:ba87babd629f8af77f557b61e49e7c7cac36f22f871156b91e10a6e9d4f829e9"}, + {file = "yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b"}, + {file = "yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1"}, ] [package.dependencies] @@ -7621,13 +7655,13 @@ jupyter = ["ipytree (>=0.2.2)", "ipywidgets (>=8.0.0)", "notebook"] [[package]] name = "zipp" -version = "3.20.2" +version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, - {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] @@ -7655,4 +7689,4 @@ xarm = ["gym-xarm"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "5350e7ec386ad3a610c0cb546a1681762cc8c6d37eef47fd833ab958b815b260" +content-hash = "6286d7134f712555abe029b9350f4d37a4a443235477fa1e0f91fa10a7e69789" diff --git a/pyproject.toml b/pyproject.toml index 5fd6084e88..5af59cceb2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ opencv-python = ">=4.9.0" diffusers = ">=0.27.2" torchvision = ">=0.17.1" h5py = ">=3.10.0" -huggingface-hub = {extras = ["hf-transfer", "cli"], version = ">=0.25.2"} +huggingface-hub = {extras = ["hf-transfer", "cli"], version = ">=0.27.1"} gymnasium = "==0.29.1" # TODO(rcadene, aliberts): Make gym 1.0.0 work cmake = ">=3.29.0.1" gym-dora = { git = "https://github.com/dora-rs/dora-lerobot.git", subdirectory = "gym_dora", optional = true } @@ -71,7 +71,9 @@ pyrender = {git = "https://github.com/mmatl/pyrender.git", markers = "sys_platfo hello-robot-stretch-body = {version = ">=0.7.27", markers = "sys_platform == 'linux'", optional = true} pyserial = {version = ">=3.5", optional = true} jsonlines = ">=4.0.0" -draccus = ">=0.9.3" +# draccus = ">=0.9.4" # replace when https://github.com/dlwh/draccus/pull/29 is in release +draccus = {git = "https://github.com/dlwh/draccus.git", rev = "55e456a"} +# draccus = { path = "../draccus/", develop = true} [tool.poetry.extras] From 4a4ef9b15e954701ce029381acfecf190f425ac6 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 6 Jan 2025 22:09:25 +0100 Subject: [PATCH 14/94] Fix pretrained_path --- lerobot/configs/eval.py | 2 +- lerobot/configs/training.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index 97156f1ca0..a5b80bced9 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -50,7 +50,7 @@ def __post_init__(self): if policy_path: cli_overrides = parser.get_cli_overrides("policy") self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.set_pretrained_path(policy_path) + self.policy.pretrained_path = policy_path if not self.job_name: self.job_name = f"{self.env.type}_{self.policy.type}" diff --git a/lerobot/configs/training.py b/lerobot/configs/training.py index ca0b6656e7..b489cd7cec 100644 --- a/lerobot/configs/training.py +++ b/lerobot/configs/training.py @@ -115,7 +115,7 @@ def __post_init__(self): if policy_path: cli_overrides = parser.get_cli_overrides("policy") self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.set_pretrained_path(policy_path) + self.policy.pretrained_path = policy_path if not self.job_name: self.job_name = f"{self.env.type}_{self.policy.type}" From 68463a344153f09928acd895aeec48d66a711a05 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 7 Jan 2025 18:28:49 +0100 Subject: [PATCH 15/94] Refactor envs, remove RealEnv --- lerobot/common/envs/__init__.py | 2 +- lerobot/common/envs/configs.py | 71 +++++++++++++++++++++------------ lerobot/common/envs/factory.py | 21 +++------- lerobot/configs/eval.py | 8 +++- lerobot/configs/training.py | 10 ++++- 5 files changed, 68 insertions(+), 44 deletions(-) diff --git a/lerobot/common/envs/__init__.py b/lerobot/common/envs/__init__.py index 490b487c0d..a583ffc5be 100644 --- a/lerobot/common/envs/__init__.py +++ b/lerobot/common/envs/__init__.py @@ -1 +1 @@ -from .configs import AlohaEnv, EnvConfig, RealEnv # noqa: F401 +from .configs import AlohaEnv, EnvConfig, PushtEnv, XarmEnv # noqa: F401 diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py index 1c4042e049..340682a692 100644 --- a/lerobot/common/envs/configs.py +++ b/lerobot/common/envs/configs.py @@ -1,3 +1,4 @@ +import abc from dataclasses import dataclass, field import draccus @@ -6,7 +7,7 @@ @dataclass -class EnvConfig(draccus.ChoiceRegistry): +class EnvConfig(draccus.ChoiceRegistry, abc.ABC): n_envs: int | None = None task: str | None = None fps: int = 30 @@ -16,11 +17,9 @@ class EnvConfig(draccus.ChoiceRegistry): def type(self) -> str: return self.get_choice_name(self.__class__) - -@EnvConfig.register_subclass("real_world") -@dataclass -class RealEnv(EnvConfig): - pass + @abc.abstractproperty + def gym_kwargs(self) -> dict: + raise NotImplementedError() @EnvConfig.register_subclass("aloha") @@ -38,12 +37,16 @@ class AlohaEnv(EnvConfig): "action": FeatureType.ACTION, } ) - gym: dict = field( - default_factory=lambda: { - "obs_type": "pixels_agent_pos", - "render_mode": "rgb_array", + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + + @property + def gym_kwargs(self) -> dict: + return { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + "max_episode_steps": self.episode_length, } - ) @EnvConfig.register_subclass("pusht") @@ -59,14 +62,27 @@ class PushtEnv(EnvConfig): "action": FeatureType.ACTION, } ) - gym: dict = field( - default_factory=lambda: { - "obs_type": "pixels_agent_pos", - "render_mode": "rgb_array", - "visualization_width": 384, - "visualization_height": 384, + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + visualization_width: int = 384 + visualization_height: int = 384 + + def __post_init__(self): + if self.obs_type == "environment_state_agent_pos": + self.feature_types = { + "agent_pos": FeatureType.STATE, + "environment_state": FeatureType.ENV, + "action": FeatureType.ACTION, + } + + @property + def gym_kwargs(self) -> dict: + return { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + "visualization_width": self.visualization_width, + "visualization_height": self.visualization_height, } - ) @EnvConfig.register_subclass("xarm") @@ -82,11 +98,16 @@ class XarmEnv(EnvConfig): "action": FeatureType.ACTION, } ) - gym: dict = field( - default_factory=lambda: { - "obs_type": "pixels_agent_pos", - "render_mode": "rgb_array", - "visualization_width": 384, - "visualization_height": 384, + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + visualization_width: int = 384 + visualization_height: int = 384 + + @property + def gym_kwargs(self) -> dict: + return { + "obs_type": self.obs_type, + "render_mode": self.render_mode, + "visualization_width": self.visualization_width, + "visualization_height": self.visualization_height, } - ) diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 4cf96f8ced..f9a4dfcecf 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -20,19 +20,17 @@ from lerobot.common.envs.configs import EnvConfig -def make_env( - cfg: EnvConfig, n_envs: int | None = None, use_async_envs: bool = False -) -> gym.vector.VectorEnv | None: +def make_env(cfg: EnvConfig, n_envs: int = 1, use_async_envs: bool = False) -> gym.vector.VectorEnv | None: """Makes a gym vector environment according to the evaluation config. n_envs can be used to override eval.batch_size in the configuration. Must be at least 1. """ - if n_envs is not None and n_envs < 1: - raise ValueError("`n_envs must be at least 1") - - if cfg.type == "real_world": + if cfg is None: return + if n_envs < 1: + raise ValueError("`n_envs must be at least 1") + package_name = f"gym_{cfg.type}" try: @@ -42,18 +40,11 @@ def make_env( raise e gym_handle = f"{package_name}/{cfg.task}" - gym_kwgs = getattr(cfg, "gym", {}) - - if getattr(cfg, "episode_length", None): - gym_kwgs["max_episode_steps"] = cfg.episode_length # batched version of the env that returns an observation of shape (b, c) env_cls = gym.vector.AsyncVectorEnv if use_async_envs else gym.vector.SyncVectorEnv env = env_cls( - [ - lambda: gym.make(gym_handle, disable_env_checker=True, **gym_kwgs) - for _ in range(n_envs if n_envs is not None else cfg.eval.batch_size) - ] + [lambda: gym.make(gym_handle, disable_env_checker=True, **cfg.gym_kwargs) for _ in range(n_envs)] ) return env diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index a5b80bced9..caba3f5a2b 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -2,7 +2,7 @@ from dataclasses import Field, dataclass, fields from pathlib import Path -from lerobot.common import envs # F401: noqa +from lerobot.common import envs, policies # noqa: F401 from lerobot.configs import parser from lerobot.configs.policies import PretrainedConfig @@ -45,6 +45,12 @@ class EvalPipelineConfig: seed: int | None = 1000 def __post_init__(self): + if self.use_amp and self.device not in ["cuda", "cpu"]: + raise NotImplementedError( + "Automatic Mixed Precision (amp) is only available for 'cuda' and 'cpu' devices. " + f"Selected device: {self.device}" + ) + # HACK: We parse again the cli args here to get the pretrained path if there was one. policy_path = parser.get_path_arg("policy") if policy_path: diff --git a/lerobot/configs/training.py b/lerobot/configs/training.py index b489cd7cec..c62fa42163 100644 --- a/lerobot/configs/training.py +++ b/lerobot/configs/training.py @@ -65,7 +65,7 @@ class OnlineConfig: @dataclass class TrainPipelineConfig: dataset: DatasetConfig - env: envs.EnvConfig = field(default_factory=envs.RealEnv) + env: envs.EnvConfig | None = None policy: PretrainedConfig | None = None # Set `dir` to where you would like to save all of the run outputs. If you run another training session # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. @@ -102,6 +102,12 @@ class TrainPipelineConfig: def __post_init__(self): self.checkpoint_path = None + if self.use_amp and self.device not in ["cuda", "cpu"]: + raise NotImplementedError( + "Automatic Mixed Precision (amp) is only available for 'cuda' and 'cpu' devices. " + f"Selected device: {self.device}" + ) + # HACK: We parse again the cli args here to get the pretrained paths if there was some. if self.resume: # The entire train config is already loaded, we just need to get the checkpoint dir @@ -110,9 +116,9 @@ def __post_init__(self): self.policy.pretrained_path = policy_path self.checkpoint_path = policy_path.parent else: - # Only load the policy config policy_path = parser.get_path_arg("policy") if policy_path: + # Only load the policy config cli_overrides = parser.get_cli_overrides("policy") self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path From 2bdf1d23b0786e697ab6aef2b1291731cf2b4d83 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 7 Jan 2025 18:29:16 +0100 Subject: [PATCH 16/94] Fix typo --- lerobot/scripts/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 0dbb098ca4..cc24301b9a 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -339,7 +339,7 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): # Create an env dedicated to online episodes collection from policy rollout. online_env = make_env(cfg.env, n_envs=cfg.online.rollout_batch_size) - detla_timestamps = resolve_delta_timestamps(cfg.policy, offline_dataset.meta) + delta_timestamps = resolve_delta_timestamps(cfg.policy, offline_dataset.meta) online_buffer_path = logger.log_dir / "online_buffer" if cfg.resume and not online_buffer_path.exists(): # If we are resuming a run, we default to the data shapes and buffer capacity from the saved online @@ -367,7 +367,7 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): }, buffer_capacity=cfg.online.buffer_capacity, fps=online_env.unwrapped.metadata["render_fps"], - delta_timestamps=detla_timestamps, + delta_timestamps=delta_timestamps, ) # If we are doing online rollouts asynchronously, deepcopy the policy to use for online rollouts (this From 9c6edc2c4ae301acc17ab8f5ca5a2f2e3659c19f Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 7 Jan 2025 18:29:50 +0100 Subject: [PATCH 17/94] Enable end-to-end tests --- .github/workflows/test.yml | 71 +++++---- Makefile | 287 ++++++++++++++++++++----------------- 2 files changed, 188 insertions(+), 170 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 53b37466a2..3a79c60fc5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -101,39 +101,38 @@ jobs: -W ignore::UserWarning:gymnasium.utils.env_checker:247 \ && rm -rf tests/outputs outputs - # TODO(aliberts, rcadene): redesign after v2 migration / removing hydra - # end-to-end: - # name: End-to-end - # runs-on: ubuntu-latest - # env: - # MUJOCO_GL: egl - # steps: - # - uses: actions/checkout@v4 - # with: - # lfs: true # Ensure LFS files are pulled - - # - name: Install apt dependencies - # # portaudio19-dev is needed to install pyaudio - # run: | - # sudo apt-get update && \ - # sudo apt-get install -y libegl1-mesa-dev portaudio19-dev - - # - name: Install poetry - # run: | - # pipx install poetry && poetry config virtualenvs.in-project true - # echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH - - # - name: Set up Python 3.10 - # uses: actions/setup-python@v5 - # with: - # python-version: "3.10" - # cache: "poetry" - - # - name: Install poetry dependencies - # run: | - # poetry install --all-extras - - # - name: Test end-to-end - # run: | - # make test-end-to-end \ - # && rm -rf outputs + end-to-end: + name: End-to-end + runs-on: ubuntu-latest + env: + MUJOCO_GL: egl + steps: + - uses: actions/checkout@v4 + with: + lfs: true # Ensure LFS files are pulled + + - name: Install apt dependencies + # portaudio19-dev is needed to install pyaudio + run: | + sudo apt-get update && \ + sudo apt-get install -y libegl1-mesa-dev portaudio19-dev + + - name: Install poetry + run: | + pipx install poetry && poetry config virtualenvs.in-project true + echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: "poetry" + + - name: Install poetry dependencies + run: | + poetry install --all-extras + + - name: Test end-to-end + run: | + make test-end-to-end \ + && rm -rf outputs diff --git a/Makefile b/Makefile index f6517497e1..9f77728d42 100644 --- a/Makefile +++ b/Makefile @@ -32,159 +32,178 @@ test-end-to-end: ${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial test-act-ete-train: + debugpython lerobot/scripts/train.py \ + --policy.type=act \ + --policy.dim_model=64 \ + --policy.n_action_steps=20 \ + --policy.chunk_size=20 \ + --env.type=aloha \ + --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ + --dataset.image_transforms.enable=true \ + --dataset.episodes="[0]" \ + --batch_size=2 \ + --offline.steps=4 \ + --online.steps=0 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --save_freq=2 \ + --save_checkpoint=true \ + --log_freq=1 \ + --wandb.enable=false \ + --device=$(DEVICE) \ + --output_dir=tests/outputs/act/ + +test-act-ete-train-resume: python lerobot/scripts/train.py \ - policy=act \ - policy.dim_model=64 \ - env=aloha \ - wandb.enable=False \ - training.offline_steps=2 \ - training.online_steps=0 \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - device=$(DEVICE) \ - training.save_checkpoint=true \ - training.save_freq=2 \ - policy.n_action_steps=20 \ - policy.chunk_size=20 \ - training.batch_size=2 \ - training.image_transforms.enable=true \ - hydra.run.dir=tests/outputs/act/ + --config_path=tests/outputs/act/checkpoints/000002/pretrained_model/train_config.json \ + --resume=true test-act-ete-eval: python lerobot/scripts/eval.py \ - -p tests/outputs/act/checkpoints/000002/pretrained_model \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - env.episode_length=8 \ - device=$(DEVICE) \ + --policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \ + --env.type=aloha \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --eval.episode_length=8 \ + --device=$(DEVICE) test-act-ete-train-amp: python lerobot/scripts/train.py \ - policy=act \ - policy.dim_model=64 \ - env=aloha \ - wandb.enable=False \ - training.offline_steps=2 \ - training.online_steps=0 \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - device=$(DEVICE) \ - training.save_checkpoint=true \ - training.save_freq=2 \ - policy.n_action_steps=20 \ - policy.chunk_size=20 \ - training.batch_size=2 \ - hydra.run.dir=tests/outputs/act_amp/ \ - training.image_transforms.enable=true \ - use_amp=true + --policy.type=act \ + --policy.dim_model=64 \ + --policy.n_action_steps=20 \ + --policy.chunk_size=20 \ + --env.type=aloha \ + --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ + --dataset.image_transforms.enable=true \ + --dataset.episodes="[0]" \ + --offline.steps=2 \ + --online.steps=0 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --save_checkpoint=true \ + --save_freq=2 \ + --log_freq=1 \ + --batch_size=2 \ + --wandb.enable=false \ + --device=$(DEVICE) \ + --use_amp=true \ + --output_dir=tests/outputs/act_amp/ test-act-ete-eval-amp: python lerobot/scripts/eval.py \ - -p tests/outputs/act_amp/checkpoints/000002/pretrained_model \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - env.episode_length=8 \ - device=$(DEVICE) \ - use_amp=true + --policy.path=tests/outputs/act/checkpoints/000002/pretrained_model \ + --env.type=aloha \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --eval.episode_length=8 \ + --use_amp=true \ + --device=$(DEVICE) test-diffusion-ete-train: python lerobot/scripts/train.py \ - policy=diffusion \ - policy.down_dims=\[64,128,256\] \ - policy.diffusion_step_embed_dim=32 \ - policy.num_inference_steps=10 \ - env=pusht \ - wandb.enable=False \ - training.offline_steps=2 \ - training.online_steps=0 \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - device=$(DEVICE) \ - training.save_checkpoint=true \ - training.save_freq=2 \ - training.batch_size=2 \ - training.image_transforms.enable=true \ - hydra.run.dir=tests/outputs/diffusion/ + --policy.type=diffusion \ + --policy.down_dims='[64,128,256]' \ + --policy.diffusion_step_embed_dim=32 \ + --policy.num_inference_steps=10 \ + --env.type=pusht \ + --dataset.repo_id=lerobot/pusht \ + --dataset.image_transforms.enable=true \ + --dataset.episodes="[0]" \ + --batch_size=2 \ + --offline.steps=2 \ + --online.steps=0 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --save_checkpoint=true \ + --save_freq=2 \ + --log_freq=1 \ + --wandb.enable=false \ + --device=$(DEVICE) \ + --output_dir=tests/outputs/diffusion/ test-diffusion-ete-eval: python lerobot/scripts/eval.py \ - -p tests/outputs/diffusion/checkpoints/000002/pretrained_model \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - env.episode_length=8 \ - device=$(DEVICE) \ + --policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \ + --env.type=aloha \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --eval.episode_length=8 \ + --device=$(DEVICE) test-tdmpc-ete-train: python lerobot/scripts/train.py \ - policy=tdmpc \ - env=xarm \ - env.task=XarmLift-v0 \ - dataset_repo_id=lerobot/xarm_lift_medium \ - wandb.enable=False \ - training.offline_steps=2 \ - training.online_steps=0 \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - env.episode_length=2 \ - device=$(DEVICE) \ - training.save_checkpoint=true \ - training.save_freq=2 \ - training.batch_size=2 \ - training.image_transforms.enable=true \ - hydra.run.dir=tests/outputs/tdmpc/ - -test-tdmpc-ete-train-with-online: - python lerobot/scripts/train.py \ - env=pusht \ - env.gym.obs_type=environment_state_agent_pos \ - policy=tdmpc_pusht_keypoints \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - env.episode_length=10 \ - device=$(DEVICE) \ - training.offline_steps=2 \ - training.online_steps=20 \ - training.save_checkpoint=false \ - training.save_freq=10 \ - training.batch_size=2 \ - training.online_rollout_n_episodes=2 \ - training.online_rollout_batch_size=2 \ - training.online_steps_between_rollouts=10 \ - training.online_buffer_capacity=15 \ - eval.use_async_envs=true \ - hydra.run.dir=tests/outputs/tdmpc_online/ - + --policy.type=tdmpc \ + --env.type=xarm \ + --env.task=XarmLift-v0 \ + --env.episode_length=2 \ + --dataset.repo_id=lerobot/xarm_lift_medium \ + --dataset.image_transforms.enable=true \ + --dataset.episodes='[0]' \ + --batch_size=2 \ + --offline.steps=2 \ + --online.steps=0 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --save_checkpoint=true \ + --save_freq=2 \ + --log_freq=1 \ + --wandb.enable=false \ + --device=$(DEVICE) \ + --output_dir=tests/outputs/tdmpc/ test-tdmpc-ete-eval: python lerobot/scripts/eval.py \ - -p tests/outputs/tdmpc/checkpoints/000002/pretrained_model \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - env.episode_length=8 \ - device=$(DEVICE) \ - -test-default-ete-eval: - python lerobot/scripts/eval.py \ - --config lerobot/configs/default.yaml \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - env.episode_length=8 \ - device=$(DEVICE) \ - -test-act-pusht-tutorial: - cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/created_by_Makefile.yaml - python lerobot/scripts/train.py \ - policy=created_by_Makefile.yaml \ - env=pusht \ - wandb.enable=False \ - training.offline_steps=2 \ - eval.n_episodes=1 \ - eval.batch_size=1 \ - env.episode_length=2 \ - device=$(DEVICE) \ - training.save_model=true \ - training.save_freq=2 \ - training.batch_size=2 \ - training.image_transforms.enable=true \ - hydra.run.dir=tests/outputs/act_pusht/ - rm lerobot/configs/policy/created_by_Makefile.yaml + --policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \ + --env.type=xarm \ + --env.task=XarmLift-v0 \ + --env.episode_length=2 \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --eval.episode_length=8 \ + --device=$(DEVICE) + +# FIXME: currently broken +# test-tdmpc-ete-train-with-online: +# python lerobot/scripts/train.py \ + --policy.type=tdmpc \ + --env.type=pusht \ + --env.obs_type=environment_state_agent_pos \ + --env.episode_length=10 \ + --dataset.repo_id=lerobot/pusht_keypoints \ + --dataset.image_transforms.enable=true \ + --dataset.episodes='[0]' \ + --batch_size=2 \ + --offline.steps=2 \ + --online.steps=20 \ + --online.rollout_n_episodes=2 \ + --online.rollout_batch_size=2 \ + --online.steps_between_rollouts=10 \ + --online.buffer_capacity=15 \ + --online.env_seed=10000 \ + --save_checkpoint=false \ + --save_freq=10 \ + --log_freq=1 \ + --eval.use_async_envs=true \ + --eval.n_episodes=1 \ + --eval.batch_size=1 \ + --device=$(DEVICE) \ + --output_dir=tests/outputs/tdmpc_online/ + +# test-act-pusht-tutorial: +# cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/created_by_Makefile.yaml +# python lerobot/scripts/train.py \ +# policy=created_by_Makefile.yaml \ +# env=pusht \ +# wandb.enable=False \ +# training.offline_steps=2 \ +# eval.n_episodes=1 \ +# eval.batch_size=1 \ +# env.episode_length=2 \ +# device=$(DEVICE) \ +# training.save_model=true \ +# training.save_freq=2 \ +# training.batch_size=2 \ +# training.image_transforms.enable=true \ +# hydra.run.dir=tests/outputs/act_pusht/ +# rm lerobot/configs/policy/created_by_Makefile.yaml From a29a1f17c48b386a8dff9e698de1e629b9bf51e2 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 7 Jan 2025 18:34:31 +0100 Subject: [PATCH 18/94] Fix Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9f77728d42..29a60b88cc 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,7 @@ test-end-to-end: ${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial test-act-ete-train: - debugpython lerobot/scripts/train.py \ + python lerobot/scripts/train.py \ --policy.type=act \ --policy.dim_model=64 \ --policy.n_action_steps=20 \ From d83a94c1ce43bbbd914b545f6c1317c05deaf54e Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Wed, 8 Jan 2025 09:59:20 +0100 Subject: [PATCH 19/94] Log eval config --- lerobot/scripts/eval.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 5d408a10a8..ad5db2478b 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -47,7 +47,9 @@ import time from contextlib import nullcontext from copy import deepcopy +from dataclasses import asdict from pathlib import Path +from pprint import pformat from typing import Callable import einops @@ -66,6 +68,7 @@ from lerobot.common.utils.io_utils import write_video from lerobot.common.utils.utils import ( get_safe_torch_device, + init_logging, inside_slurm, set_global_seed, ) @@ -438,6 +441,9 @@ def _compile_episode_data( @parser.wrap(pathable_args=["policy"]) def eval(cfg: EvalPipelineConfig): + init_logging() + logging.info(pformat(asdict(cfg))) + # Check device is available device = get_safe_torch_device(cfg.device, log=True) @@ -456,7 +462,6 @@ def eval(cfg: EvalPipelineConfig): device=device, env=env, env_cfg=cfg.env, - # pretrained_policy_name_or_path=str(cfg.policy_path), ) policy.eval() From 26eef6eac5907b433bff14a655bd108d19dcf85d Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Wed, 8 Jan 2025 13:58:42 +0100 Subject: [PATCH 20/94] Fix end-to-end tests --- Makefile | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 29a60b88cc..0136f39a4b 100644 --- a/Makefile +++ b/Makefile @@ -26,10 +26,11 @@ test-end-to-end: ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-train ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train - ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train-with-online ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval - ${MAKE} DEVICE=$(DEVICE) test-default-ete-eval - ${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial + +# ${MAKE} DEVICE=$(DEVICE) test-default-ete-eval +# ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train-with-online +# ${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial test-act-ete-train: python lerobot/scripts/train.py \ @@ -125,7 +126,7 @@ test-diffusion-ete-train: test-diffusion-ete-eval: python lerobot/scripts/eval.py \ --policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \ - --env.type=aloha \ + --env.type=pusht \ --eval.n_episodes=1 \ --eval.batch_size=1 \ --eval.episode_length=8 \ @@ -190,6 +191,7 @@ test-tdmpc-ete-eval: --device=$(DEVICE) \ --output_dir=tests/outputs/tdmpc_online/ +# TODO: do we keep this one? # test-act-pusht-tutorial: # cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/created_by_Makefile.yaml # python lerobot/scripts/train.py \ From b799e02afb759bf3a1f0f96ad26e2be625c665a7 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Wed, 8 Jan 2025 14:13:52 +0100 Subject: [PATCH 21/94] Remove amp & add resume test --- Makefile | 36 +----------------------------------- 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/Makefile b/Makefile index 0136f39a4b..a99422d299 100644 --- a/Makefile +++ b/Makefile @@ -20,9 +20,8 @@ build-gpu: test-end-to-end: ${MAKE} DEVICE=$(DEVICE) test-act-ete-train + ${MAKE} DEVICE=$(DEVICE) test-act-ete-train-resume ${MAKE} DEVICE=$(DEVICE) test-act-ete-eval - ${MAKE} DEVICE=$(DEVICE) test-act-ete-train-amp - ${MAKE} DEVICE=$(DEVICE) test-act-ete-eval-amp ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-train ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train @@ -68,39 +67,6 @@ test-act-ete-eval: --eval.episode_length=8 \ --device=$(DEVICE) -test-act-ete-train-amp: - python lerobot/scripts/train.py \ - --policy.type=act \ - --policy.dim_model=64 \ - --policy.n_action_steps=20 \ - --policy.chunk_size=20 \ - --env.type=aloha \ - --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ - --dataset.image_transforms.enable=true \ - --dataset.episodes="[0]" \ - --offline.steps=2 \ - --online.steps=0 \ - --eval.n_episodes=1 \ - --eval.batch_size=1 \ - --save_checkpoint=true \ - --save_freq=2 \ - --log_freq=1 \ - --batch_size=2 \ - --wandb.enable=false \ - --device=$(DEVICE) \ - --use_amp=true \ - --output_dir=tests/outputs/act_amp/ - -test-act-ete-eval-amp: - python lerobot/scripts/eval.py \ - --policy.path=tests/outputs/act/checkpoints/000002/pretrained_model \ - --env.type=aloha \ - --eval.n_episodes=1 \ - --eval.batch_size=1 \ - --eval.episode_length=8 \ - --use_amp=true \ - --device=$(DEVICE) - test-diffusion-ete-train: python lerobot/scripts/train.py \ --policy.type=diffusion \ From 6c5667a732b06fdf1cd8b3102ee508231964eaff Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Wed, 8 Jan 2025 14:23:49 +0100 Subject: [PATCH 22/94] Speed-up tests --- Makefile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a99422d299..39881874f8 100644 --- a/Makefile +++ b/Makefile @@ -38,6 +38,7 @@ test-act-ete-train: --policy.n_action_steps=20 \ --policy.chunk_size=20 \ --env.type=aloha \ + --env.episode_length=5 \ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ --dataset.image_transforms.enable=true \ --dataset.episodes="[0]" \ @@ -62,6 +63,7 @@ test-act-ete-eval: python lerobot/scripts/eval.py \ --policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \ --env.type=aloha \ + --env.episode_length=5 \ --eval.n_episodes=1 \ --eval.batch_size=1 \ --eval.episode_length=8 \ @@ -74,6 +76,7 @@ test-diffusion-ete-train: --policy.diffusion_step_embed_dim=32 \ --policy.num_inference_steps=10 \ --env.type=pusht \ + --env.episode_length=5 \ --dataset.repo_id=lerobot/pusht \ --dataset.image_transforms.enable=true \ --dataset.episodes="[0]" \ @@ -93,6 +96,7 @@ test-diffusion-ete-eval: python lerobot/scripts/eval.py \ --policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \ --env.type=pusht \ + --env.episode_length=5 \ --eval.n_episodes=1 \ --eval.batch_size=1 \ --eval.episode_length=8 \ @@ -103,7 +107,7 @@ test-tdmpc-ete-train: --policy.type=tdmpc \ --env.type=xarm \ --env.task=XarmLift-v0 \ - --env.episode_length=2 \ + --env.episode_length=5 \ --dataset.repo_id=lerobot/xarm_lift_medium \ --dataset.image_transforms.enable=true \ --dataset.episodes='[0]' \ @@ -123,6 +127,7 @@ test-tdmpc-ete-eval: python lerobot/scripts/eval.py \ --policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \ --env.type=xarm \ + --env.episode_length=5 \ --env.task=XarmLift-v0 \ --env.episode_length=2 \ --eval.n_episodes=1 \ From af96b046daac1dbec03a4beaecbe0fcf43f0c5ff Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Wed, 8 Jan 2025 14:39:00 +0100 Subject: [PATCH 23/94] Fix poetry relax --- poetry.lock | 16 ++++++++-------- pyproject.toml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8d86d3ed02..33c4d7f151 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "absl-py" @@ -4260,10 +4260,10 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, - {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, + {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] [[package]] @@ -4284,10 +4284,10 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, - {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, + {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] [[package]] @@ -4376,9 +4376,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, - {version = ">=1.23.2", markers = "python_version == \"3.11\""}, {version = ">=1.22.4", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -7689,4 +7689,4 @@ xarm = ["gym-xarm"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "6286d7134f712555abe029b9350f4d37a4a443235477fa1e0f91fa10a7e69789" +content-hash = "a67ba1530855ce3ad03d8ea9629f2fae439d8c5b83702c64f73092ea4f9fa43b" diff --git a/pyproject.toml b/pyproject.toml index 5af59cceb2..30bc7eb96b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,7 +72,7 @@ hello-robot-stretch-body = {version = ">=0.7.27", markers = "sys_platform == 'li pyserial = {version = ">=3.5", optional = true} jsonlines = ">=4.0.0" # draccus = ">=0.9.4" # replace when https://github.com/dlwh/draccus/pull/29 is in release -draccus = {git = "https://github.com/dlwh/draccus.git", rev = "55e456a"} +draccus = {git = "https://github.com/dlwh/draccus.git"} # draccus = { path = "../draccus/", develop = true} From 4261c5a4b82ae23657a4c174d8f737b4f49d1511 Mon Sep 17 00:00:00 2001 From: Remi Date: Thu, 9 Jan 2025 11:40:29 +0100 Subject: [PATCH 24/94] Remove config yaml for robot devices (#594) Co-authored-by: Simon Alibert --- examples/2_evaluate_pretrained_policy.py | 13 +- examples/3_train_policy.py | 1 + examples/7_get_started_with_real_robot.md | 1 + .../advanced/2_calculate_validation_loss.py | 1 + examples/port_datasets/pusht_zarr.py | 2 +- lerobot/__init__.py | 19 - lerobot/common/datasets/factory.py | 25 - lerobot/common/datasets/transforms.py | 132 ++--- lerobot/common/datasets/utils.py | 2 +- .../v2/batch_convert_dataset_v1_to_v2.py | 8 +- .../datasets/v2/convert_dataset_v1_to_v2.py | 53 +- lerobot/common/envs/factory.py | 13 +- lerobot/common/logger.py | 25 +- .../common/policies/act/configuration_act.py | 4 +- lerobot/common/policies/factory.py | 17 + lerobot/common/policies/normalize.py | 8 +- .../policies/tdmpc/configuration_tdmpc.py | 9 +- .../common/policies/tdmpc/modeling_tdmpc.py | 4 +- lerobot/common/policies/utils.py | 30 - .../policies/vqbet/configuration_vqbet.py | 2 +- .../common/robot_devices/cameras/configs.py | 100 ++++ .../robot_devices/cameras/intelrealsense.py | 69 +-- .../common/robot_devices/cameras/opencv.py | 56 +- lerobot/common/robot_devices/cameras/utils.py | 36 ++ .../common/robot_devices/control_configs.py | 123 +++++ lerobot/common/robot_devices/control_utils.py | 43 +- .../common/robot_devices/motors/configs.py | 27 + .../common/robot_devices/motors/dynamixel.py | 15 +- .../common/robot_devices/motors/feetech.py | 15 +- lerobot/common/robot_devices/motors/utils.py | 37 ++ .../common/robot_devices/robots/configs.py | 516 ++++++++++++++++++ .../common/robot_devices/robots/factory.py | 9 - .../robot_devices/robots/manipulator.py | 141 ++--- .../common/robot_devices/robots/stretch.py | 22 +- lerobot/common/robot_devices/robots/utils.py | 44 ++ lerobot/common/utils/utils.py | 18 - lerobot/configs/default.py | 2 +- lerobot/configs/eval.py | 15 +- lerobot/configs/policies.py | 11 +- lerobot/configs/policy/act_aloha_real.yaml | 7 - lerobot/configs/training.py | 13 +- lerobot/scripts/control_robot.py | 461 +++++----------- lerobot/scripts/control_sim_robot.py | 6 +- lerobot/scripts/train.py | 4 +- lerobot/scripts/visualize_image_transforms.py | 171 +++--- tests/conftest.py | 10 +- tests/scripts/save_dataset_to_safetensors.py | 1 + .../save_image_transforms_to_safetensors.py | 57 +- tests/scripts/save_policy_to_safetensors.py | 101 ++-- tests/test_cameras.py | 18 +- tests/test_control_robot.py | 277 +++++----- tests/test_datasets.py | 55 +- tests/test_delta_timestamps.py | 22 +- tests/test_envs.py | 11 +- tests/test_image_transforms.py | 250 ++++++--- tests/test_policies.py | 307 ++++++----- tests/test_robots.py | 23 +- tests/test_utils.py | 9 - tests/utils.py | 80 +-- 59 files changed, 1956 insertions(+), 1595 deletions(-) create mode 100644 lerobot/common/robot_devices/cameras/configs.py create mode 100644 lerobot/common/robot_devices/control_configs.py create mode 100644 lerobot/common/robot_devices/motors/configs.py create mode 100644 lerobot/common/robot_devices/robots/configs.py delete mode 100644 lerobot/common/robot_devices/robots/factory.py diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py index b2fe1dba18..5c3640682e 100644 --- a/examples/2_evaluate_pretrained_policy.py +++ b/examples/2_evaluate_pretrained_policy.py @@ -1,6 +1,11 @@ """ This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first. + +It requires the installation of the 'gym_pusht' simulation environment. Install it by running: +```bash +pip install -e ".[pusht]"` +``` """ from pathlib import Path @@ -10,7 +15,6 @@ import imageio import numpy import torch -from huggingface_hub import snapshot_download from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy @@ -18,11 +22,12 @@ output_directory = Path("outputs/eval/example_pusht_diffusion") output_directory.mkdir(parents=True, exist_ok=True) -# Download the diffusion policy for pusht environment -pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht")) -# OR uncomment the following to evaluate a policy from the local outputs/train folder. +# Provide the [hugging face repo id](https://huggingface.co/lerobot/diffusion_pusht): +pretrained_policy_path = "lerobot/diffusion_pusht" +# OR a path to a local outputs/train folder. # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") +# TODO(alibert, rcadene): fix this file policy = DiffusionPolicy.from_pretrained(pretrained_policy_path) policy.eval() diff --git a/examples/3_train_policy.py b/examples/3_train_policy.py index 935ab2dbff..c52ee32191 100644 --- a/examples/3_train_policy.py +++ b/examples/3_train_policy.py @@ -40,6 +40,7 @@ # For this example, no arguments need to be passed because the defaults are set up for PushT. # If you're doing something different, you will likely need to change at least some of the defaults. cfg = DiffusionConfig() +cfg.parse_features_from_dataset(dataset.meta) policy = DiffusionPolicy(cfg, dataset_stats=dataset.meta.stats) policy.train() policy.to(device) diff --git a/examples/7_get_started_with_real_robot.md b/examples/7_get_started_with_real_robot.md index 1c5cd10476..c86508e7c7 100644 --- a/examples/7_get_started_with_real_robot.md +++ b/examples/7_get_started_with_real_robot.md @@ -946,6 +946,7 @@ fps = 30 device = "cuda" # TODO: On Mac, use "mps" or "cpu" ckpt_path = "outputs/train/act_koch_test/checkpoints/last/pretrained_model" +# TODO(alibert, rcadene): fix this file policy = ACTPolicy.from_pretrained(ckpt_path) policy.to(device) diff --git a/examples/advanced/2_calculate_validation_loss.py b/examples/advanced/2_calculate_validation_loss.py index 00ba9930fa..25587b2927 100644 --- a/examples/advanced/2_calculate_validation_loss.py +++ b/examples/advanced/2_calculate_validation_loss.py @@ -24,6 +24,7 @@ # OR uncomment the following to evaluate a policy from the local outputs/train folder. # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") +# TODO(alibert, rcadene): fix this file policy = DiffusionPolicy.from_pretrained(pretrained_policy_path) policy.eval() policy.to(device) diff --git a/examples/port_datasets/pusht_zarr.py b/examples/port_datasets/pusht_zarr.py index 60df984054..1506f42733 100644 --- a/examples/port_datasets/pusht_zarr.py +++ b/examples/port_datasets/pusht_zarr.py @@ -44,7 +44,7 @@ "dtype": None, "shape": (3, 96, 96), "names": [ - "channel", + "channels", "height", "width", ], diff --git a/lerobot/__init__.py b/lerobot/__init__.py index 3d5bb6aaa6..d61e4853e6 100644 --- a/lerobot/__init__.py +++ b/lerobot/__init__.py @@ -58,7 +58,6 @@ ], "pusht": ["PushT-v0"], "xarm": ["XarmLift-v0"], - "dora_aloha_real": ["DoraAloha-v0", "DoraKoch-v0", "DoraReachy2-v0"], } available_envs = list(available_tasks_per_env.keys()) @@ -86,23 +85,6 @@ "lerobot/xarm_push_medium_image", "lerobot/xarm_push_medium_replay_image", ], - "dora_aloha_real": [ - "lerobot/aloha_static_battery", - "lerobot/aloha_static_candy", - "lerobot/aloha_static_coffee", - "lerobot/aloha_static_coffee_new", - "lerobot/aloha_static_cups_open", - "lerobot/aloha_static_fork_pick_up", - "lerobot/aloha_static_pingpong_test", - "lerobot/aloha_static_pro_pencil", - "lerobot/aloha_static_screw_driver", - "lerobot/aloha_static_tape", - "lerobot/aloha_static_thread_velcro", - "lerobot/aloha_static_towel", - "lerobot/aloha_static_vinh_cup", - "lerobot/aloha_static_vinh_cup_left", - "lerobot/aloha_static_ziploc_slide", - ], } available_real_world_datasets = [ @@ -221,7 +203,6 @@ "xarm": ["tdmpc"], "koch_real": ["act_koch_real"], "aloha_real": ["act_aloha_real"], - "dora_aloha_real": ["act_aloha_real"], } env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks] diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index a11c4f8604..e8f047908c 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -70,31 +70,11 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas Returns: The LeRobotDataset. """ - # A soft check to warn if the environment matches the dataset. Don't check if we are using a real world env (dora). - if cfg.env.type != "dora": - if isinstance(cfg.dataset.repo_id, str): - dataset_repo_ids = [cfg.dataset.repo_id] # single dataset - elif isinstance(cfg.dataset.repo_id, list): - dataset_repo_ids = cfg.dataset.repo_id # multiple datasets - else: - raise ValueError( - "Expected cfg.dataset.repo_id to be either a single string to load one dataset or a list of " - "strings to load multiple datasets." - ) - - for dataset_repo_id in dataset_repo_ids: - if cfg.env.type not in dataset_repo_id: - logging.warning( - f"There might be a mismatch between your training dataset ({dataset_repo_id=}) and your " - f"environment ({cfg.env.type=})." - ) - image_transforms = ( ImageTransforms(cfg.dataset.image_transforms) if cfg.dataset.image_transforms.enable else None ) if isinstance(cfg.dataset.repo_id, str): - # TODO (aliberts): add 'episodes' arg from config after removing hydra ds_meta = LeRobotDatasetMetadata(cfg.dataset.repo_id, local_files_only=cfg.dataset.local_files_only) delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta) dataset = LeRobotDataset( @@ -122,10 +102,5 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas for key in dataset.meta.camera_keys: for stats_type, stats in IMAGENET_STATS.items(): dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) - # for key, stats_dict in cfg.override_dataset_stats.items(): - # for stats_type, listconfig in stats_dict.items(): - # # example of stats_type: min, max, mean, std - # stats = OmegaConf.to_container(listconfig, resolve=True) - # dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) return dataset diff --git a/lerobot/common/datasets/transforms.py b/lerobot/common/datasets/transforms.py index 6c4304d25a..ef67085ead 100644 --- a/lerobot/common/datasets/transforms.py +++ b/lerobot/common/datasets/transforms.py @@ -66,6 +66,8 @@ def __init__( self.n_subset = n_subset self.random_order = random_order + self.selected_transforms = None + def forward(self, *inputs: Any) -> Any: needs_unpacking = len(inputs) > 1 @@ -73,9 +75,9 @@ def forward(self, *inputs: Any) -> Any: if not self.random_order: selected_indices = selected_indices.sort().values - selected_transforms = [self.transforms[i] for i in selected_indices] + self.selected_transforms = [self.transforms[i] for i in selected_indices] - for transform in selected_transforms: + for transform in self.selected_transforms: outputs = transform(*inputs) inputs = outputs if needs_unpacking else (outputs,) @@ -138,67 +140,6 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor) -# TODO(aliberts): Remove -def get_image_transforms( - brightness_weight: float = 1.0, - brightness_min_max: tuple[float, float] | None = None, - contrast_weight: float = 1.0, - contrast_min_max: tuple[float, float] | None = None, - saturation_weight: float = 1.0, - saturation_min_max: tuple[float, float] | None = None, - hue_weight: float = 1.0, - hue_min_max: tuple[float, float] | None = None, - sharpness_weight: float = 1.0, - sharpness_min_max: tuple[float, float] | None = None, - max_num_transforms: int | None = None, - random_order: bool = False, -): - def check_value(name, weight, min_max): - if min_max is not None: - if len(min_max) != 2: - raise ValueError( - f"`{name}_min_max` is expected to be a tuple of 2 dimensions, but {min_max} provided." - ) - if weight < 0.0: - raise ValueError( - f"`{name}_weight` is expected to be 0 or positive, but is negative ({weight})." - ) - - check_value("brightness", brightness_weight, brightness_min_max) - check_value("contrast", contrast_weight, contrast_min_max) - check_value("saturation", saturation_weight, saturation_min_max) - check_value("hue", hue_weight, hue_min_max) - check_value("sharpness", sharpness_weight, sharpness_min_max) - - weights = [] - transforms = [] - if brightness_min_max is not None and brightness_weight > 0.0: - weights.append(brightness_weight) - transforms.append(v2.ColorJitter(brightness=brightness_min_max)) - if contrast_min_max is not None and contrast_weight > 0.0: - weights.append(contrast_weight) - transforms.append(v2.ColorJitter(contrast=contrast_min_max)) - if saturation_min_max is not None and saturation_weight > 0.0: - weights.append(saturation_weight) - transforms.append(v2.ColorJitter(saturation=saturation_min_max)) - if hue_min_max is not None and hue_weight > 0.0: - weights.append(hue_weight) - transforms.append(v2.ColorJitter(hue=hue_min_max)) - if sharpness_min_max is not None and sharpness_weight > 0.0: - weights.append(sharpness_weight) - transforms.append(SharpnessJitter(sharpness=sharpness_min_max)) - - n_subset = len(transforms) - if max_num_transforms is not None: - n_subset = min(n_subset, max_num_transforms) - - if n_subset == 0: - return v2.Identity() - else: - # TODO(rcadene, aliberts): add v2.ToDtype float16? - return RandomSubsetApply(transforms, p=weights, n_subset=n_subset, random_order=random_order) - - @dataclass class ImageTransformConfig: """ @@ -234,79 +175,74 @@ class ImageTransformsConfig: # By default, transforms are applied in Torchvision's suggested order (shown below). # Set this to True to apply them in a random order. random_order: bool = False - tfs: list[ImageTransformConfig] = field( - default_factory=lambda: [ - ImageTransformConfig( + tfs: dict[str, ImageTransformConfig] = field( + default_factory=lambda: { + "brightness": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"brightness": (0.8, 1.2)}, ), - ImageTransformConfig( + "contrast": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"contrast": (0.8, 1.2)}, ), - ImageTransformConfig( + "saturation": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"saturation": (0.5, 1.5)}, ), - ImageTransformConfig( + "hue": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"hue": (-0.05, 0.05)}, ), - ImageTransformConfig( + "sharpness": ImageTransformConfig( weight=1.0, type="SharpnessJitter", kwargs={"sharpness": (0.5, 1.5)}, ), - ] + } ) +def make_transform_from_config(cfg: ImageTransformConfig): + if cfg.type == "Identity": + return v2.Identity(**cfg.kwargs) + elif cfg.type == "ColorJitter": + return v2.ColorJitter(**cfg.kwargs) + elif cfg.type == "SharpnessJitter": + return SharpnessJitter(**cfg.kwargs) + else: + raise ValueError(f"Transform '{cfg.type}' is not valid.") + + class ImageTransforms(Transform): """A class to compose image transforms based on configuration.""" - _registry = { - "Identity": v2.Identity, - "ColorJitter": v2.ColorJitter, - "SharpnessJitter": SharpnessJitter, - } - def __init__(self, cfg: ImageTransformsConfig) -> None: super().__init__() self._cfg = cfg - weights = [] - transforms = [] - for tf_cfg in cfg.tfs: + self.weights = [] + self.transforms = {} + for tf_name, tf_cfg in cfg.tfs.items(): if tf_cfg.weight <= 0.0: continue - transform_cls = self._registry.get(tf_cfg.type) - if transform_cls is None: - available_transforms = ", ".join(self._registry.keys()) - raise ValueError( - f"Transform '{tf_cfg.type}' not found in the registry. " - f"Available transforms are: {available_transforms}" - ) - - # Instantiate the transform - transform_instance = transform_cls(**tf_cfg.kwargs) - transforms.append(transform_instance) - weights.append(tf_cfg.weight) + self.transforms[tf_name] = make_transform_from_config(tf_cfg) + self.weights.append(tf_cfg.weight) - n_subset = min(len(transforms), cfg.max_num_transforms) + n_subset = min(len(self.transforms), cfg.max_num_transforms) if n_subset == 0 or not cfg.enable: - self.transform = v2.Identity() + self.tf = v2.Identity() else: - self.transform = RandomSubsetApply( - transforms=transforms, - p=weights, + self.tf = RandomSubsetApply( + transforms=list(self.transforms.values()), + p=self.weights, n_subset=n_subset, random_order=cfg.random_order, ) def forward(self, *inputs: Any) -> Any: - return self.transform(*inputs) + return self.tf(*inputs) diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index d1cd4269de..1850c8aa46 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -449,7 +449,7 @@ def check_delta_timestamps( def get_delta_indices(delta_timestamps: dict[str, list[float]], fps: int) -> dict[str, list[int]]: delta_indices = {} for key, delta_ts in delta_timestamps.items(): - delta_indices[key] = (torch.tensor(delta_ts) * fps).long().tolist() + delta_indices[key] = [round(d * fps) for d in delta_ts] return delta_indices diff --git a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py b/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py index c8da2fe149..f126ffd20f 100644 --- a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py +++ b/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py @@ -26,13 +26,13 @@ from textwrap import dedent from lerobot import available_datasets -from lerobot.common.datasets.v2.convert_dataset_v1_to_v2 import convert_dataset, parse_robot_config +from lerobot.common.datasets.v2.convert_dataset_v1_to_v2 import convert_dataset +from lerobot.common.robot_devices.robots.configs import AlohaRobotConfig LOCAL_DIR = Path("data/") -ALOHA_CONFIG = Path("lerobot/configs/robot/aloha.yaml") ALOHA_MOBILE_INFO = { - "robot_config": parse_robot_config(ALOHA_CONFIG), + "robot_config": AlohaRobotConfig(), "license": "mit", "url": "https://mobile-aloha.github.io/", "paper": "https://arxiv.org/abs/2401.02117", @@ -45,7 +45,7 @@ }""").lstrip(), } ALOHA_STATIC_INFO = { - "robot_config": parse_robot_config(ALOHA_CONFIG), + "robot_config": AlohaRobotConfig(), "license": "mit", "url": "https://tonyzhaozh.github.io/aloha/", "paper": "https://arxiv.org/abs/2304.13705", diff --git a/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py b/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py index bf135043b0..62ca9932c7 100644 --- a/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py +++ b/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py @@ -141,7 +141,8 @@ get_image_pixel_channels, get_video_info, ) -from lerobot.common.utils.utils import init_hydra_config +from lerobot.common.robot_devices.robots.configs import RobotConfig +from lerobot.common.robot_devices.robots.utils import make_robot_config V16 = "v1.6" V20 = "v2.0" @@ -152,19 +153,18 @@ V1_STATS_PATH = "meta_data/stats.safetensors" -def parse_robot_config(config_path: Path, config_overrides: list[str] | None = None) -> tuple[str, dict]: - robot_cfg = init_hydra_config(config_path, config_overrides) - if robot_cfg["robot_type"] in ["aloha", "koch"]: +def parse_robot_config(robot_cfg: RobotConfig) -> tuple[str, dict]: + if robot_cfg.type in ["aloha", "koch"]: state_names = [ - f"{arm}_{motor}" if len(robot_cfg["follower_arms"]) > 1 else motor - for arm in robot_cfg["follower_arms"] - for motor in robot_cfg["follower_arms"][arm]["motors"] + f"{arm}_{motor}" if len(robot_cfg.follower_arms) > 1 else motor + for arm in robot_cfg.follower_arms + for motor in robot_cfg.follower_arms[arm].motors ] action_names = [ # f"{arm}_{motor}" for arm in ["left", "right"] for motor in robot_cfg["leader_arms"][arm]["motors"] - f"{arm}_{motor}" if len(robot_cfg["leader_arms"]) > 1 else motor - for arm in robot_cfg["leader_arms"] - for motor in robot_cfg["leader_arms"][arm]["motors"] + f"{arm}_{motor}" if len(robot_cfg.leader_arms) > 1 else motor + for arm in robot_cfg.leader_arms + for motor in robot_cfg.leader_arms[arm].motors ] # elif robot_cfg["robot_type"] == "stretch3": TODO else: @@ -173,7 +173,7 @@ def parse_robot_config(config_path: Path, config_overrides: list[str] | None = N ) return { - "robot_type": robot_cfg["robot_type"], + "robot_type": robot_cfg.type, "names": { "observation.state": state_names, "observation.effort": state_names, @@ -203,7 +203,10 @@ def convert_stats_to_json(v1_dir: Path, v2_dir: Path) -> None: torch.testing.assert_close(stats_json[key], stats[key]) -def get_features_from_hf_dataset(dataset: Dataset, robot_config: dict | None = None) -> dict[str, list]: +def get_features_from_hf_dataset( + dataset: Dataset, robot_config: RobotConfig | None = None +) -> dict[str, list]: + robot_config = parse_robot_config(robot_config) features = {} for key, ft in dataset.features.items(): if isinstance(ft, datasets.Value): @@ -224,11 +227,11 @@ def get_features_from_hf_dataset(dataset: Dataset, robot_config: dict | None = N image = dataset[0][key] # Assuming first row channels = get_image_pixel_channels(image) shape = (image.height, image.width, channels) - names = ["height", "width", "channel"] + names = ["height", "width", "channels"] elif ft._type == "VideoFrame": dtype = "video" shape = None # Add shape later - names = ["height", "width", "channel"] + names = ["height", "width", "channels"] features[key] = { "dtype": dtype, @@ -436,7 +439,7 @@ def convert_dataset( single_task: str | None = None, tasks_path: Path | None = None, tasks_col: Path | None = None, - robot_config: dict | None = None, + robot_config: RobotConfig | None = None, test_branch: str | None = None, **card_kwargs, ): @@ -532,7 +535,7 @@ def convert_dataset( episode_lengths = split_parquet_by_episodes(dataset, total_episodes, total_chunks, v20_dir) if robot_config is not None: - robot_type = robot_config["robot_type"] + robot_type = robot_config.type repo_tags = [robot_type] else: robot_type = "unknown" @@ -621,16 +624,10 @@ def main(): help="The path to a .json file containing one language instruction for each episode_index", ) parser.add_argument( - "--robot-config", - type=Path, - default=None, - help="Path to the robot's config yaml the dataset during conversion.", - ) - parser.add_argument( - "--robot-overrides", + "--robot", type=str, - nargs="*", - help="Any key=value arguments to override the robot config values (use dots for.nested=overrides)", + default=None, + help="Robot config used for the dataset during conversion (e.g. 'koch', 'aloha', 'so100', etc.)", ) parser.add_argument( "--local-dir", @@ -655,8 +652,10 @@ def main(): if not args.local_dir: args.local_dir = Path("/tmp/lerobot_dataset_v2") - robot_config = parse_robot_config(args.robot_config, args.robot_overrides) if args.robot_config else None - del args.robot_config, args.robot_overrides + if args.robot is not None: + robot_config = make_robot_config(args.robot) + + del args.robot convert_dataset(**vars(args), robot_config=robot_config) diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index f9a4dfcecf..f505f891a9 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -17,7 +17,18 @@ import gymnasium as gym -from lerobot.common.envs.configs import EnvConfig +from lerobot.common.envs.configs import AlohaEnv, EnvConfig, PushtEnv, XarmEnv + + +def make_env_config(env_type: str, **kwargs) -> EnvConfig: + if env_type == "aloha": + return AlohaEnv(**kwargs) + elif env_type == "pusht": + return PushtEnv(**kwargs) + elif env_type == "xarm": + return XarmEnv(**kwargs) + else: + raise ValueError(f"Policy type '{env_type}' is not available.") def make_env(cfg: EnvConfig, n_envs: int = 1, use_async_envs: bool = False) -> gym.vector.VectorEnv | None: diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 31f90a85be..e39cdb216b 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -50,9 +50,10 @@ def cfg_to_group(cfg: DictConfig, return_list: bool = False) -> list[str] | str: lst = [ f"policy:{cfg.policy.type}", f"dataset:{cfg.dataset.repo_id}", - f"env:{cfg.env.type}", f"seed:{cfg.seed}", ] + if cfg.env is not None: + lst.append(f"env:{cfg.env.type}") return lst if return_list else "-".join(lst) @@ -158,7 +159,7 @@ def save_model(self, save_dir: Path, policy: Policy, wandb_artifact_name: str | """ self.checkpoints_dir.mkdir(parents=True, exist_ok=True) - # register_features_types() + register_features_types() policy.save_pretrained(save_dir) # Also save the full config for the env configuration. with open(save_dir / TRAIN_CONFIG_FILE, "w") as f: @@ -175,18 +176,18 @@ def save_training_state( self, save_dir: Path, train_step: int, - optimizer: Optimizer, - scheduler: LRScheduler | None, + optimizer: Optimizer | None = None, + scheduler: LRScheduler | None = None, ): """Checkpoint the global training_step, optimizer state, scheduler state, and random state. All of these are saved as "training_state.pth" under the checkpoint directory. """ - training_state = { - "step": train_step, - "optimizer": optimizer.state_dict(), - **get_global_random_state(), - } + training_state = {} + training_state["step"] = train_step + training_state.update(get_global_random_state()) + if optimizer is not None: + training_state["optimizer"] = optimizer.state_dict() if scheduler is not None: training_state["scheduler"] = scheduler.state_dict() torch.save(training_state, save_dir / self.training_state_file_name) @@ -194,10 +195,10 @@ def save_training_state( def save_checkpoint( self, train_step: int, - policy: Policy, - optimizer: Optimizer, - scheduler: LRScheduler | None, identifier: str, + policy: Policy, + optimizer: Optimizer | None = None, + scheduler: LRScheduler | None = None, ): """Checkpoint the model weights and the training state.""" checkpoint_dir = self.checkpoints_dir / str(identifier) diff --git a/lerobot/common/policies/act/configuration_act.py b/lerobot/common/policies/act/configuration_act.py index 7f177f6a5e..e26efa8a96 100644 --- a/lerobot/common/policies/act/configuration_act.py +++ b/lerobot/common/policies/act/configuration_act.py @@ -134,8 +134,8 @@ class ACTConfig(PretrainedConfig): kl_weight: float = 10.0 # Training preset - optimizer_lr = 1e-5 - optimizer_weight_decay = 1e-4 + optimizer_lr: float = 1e-5 + optimizer_weight_decay: float = 1e-4 optimizer_lr_backbone: float = 1e-5 def __post_init__(self): diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 5036269111..6bc445685b 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -19,7 +19,11 @@ from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata from lerobot.common.envs.configs import EnvConfig +from lerobot.common.policies.act.configuration_act import ACTConfig +from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.policy_protocol import Policy +from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig +from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.configs.policies import PretrainedConfig @@ -45,6 +49,19 @@ def get_policy_class(name: str) -> Policy: raise NotImplementedError(f"Policy with name {name} is not implemented.") +def make_policy_config(policy_type: str, **kwargs) -> PretrainedConfig: + if policy_type == "tdmpc": + return TDMPCConfig(**kwargs) + elif policy_type == "diffusion": + return DiffusionConfig(**kwargs) + elif policy_type == "act": + return ACTConfig(**kwargs) + elif policy_type == "vqbet": + return VQBeTConfig(**kwargs) + else: + raise ValueError(f"Policy type '{policy_type}' is not available.") + + def make_policy( cfg: PretrainedConfig, device: str, diff --git a/lerobot/common/policies/normalize.py b/lerobot/common/policies/normalize.py index 59938ef9e7..d9bba38401 100644 --- a/lerobot/common/policies/normalize.py +++ b/lerobot/common/policies/normalize.py @@ -37,6 +37,9 @@ def create_stats_buffers( stats_buffers = {} for ft in features: + if ft.normalization_mode is None: + continue + assert isinstance(ft.normalization_mode, NormalizationMode) shape = tuple(ft.shape) @@ -73,7 +76,7 @@ def create_stats_buffers( } ) - if stats is not None: + if stats: # Note: The clone is needed to make sure that the logic in save_pretrained doesn't see duplicated # tensors anywhere (for example, when we use the same stats for normalization and # unnormalization). See the logic here @@ -134,6 +137,9 @@ def __init__( def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) # shallow copy avoids mutating the input batch for ft in self.features: + if ft.normalization_mode is None: + continue + buffer = getattr(self, "buffer_" + ft.key.replace(".", "_")) if ft.normalization_mode is NormalizationMode.MEAN_STD: diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/lerobot/common/policies/tdmpc/configuration_tdmpc.py index 705a0e5e2d..a5d305019a 100644 --- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py +++ b/lerobot/common/policies/tdmpc/configuration_tdmpc.py @@ -113,15 +113,14 @@ class TDMPCConfig(PretrainedConfig): horizon: int = 5 n_action_steps: int = 1 - normalization_mapping: dict[str, NormalizationMode] = field( + normalization_mapping: dict[str, NormalizationMode | None] = field( default_factory=lambda: { - "VISUAL": NormalizationMode.MIN_MAX, - "STATE": NormalizationMode.MIN_MAX, - "ENV": NormalizationMode.MIN_MAX, + "VISUAL": None, + "STATE": None, + "ENV": None, "ACTION": NormalizationMode.MIN_MAX, } ) - normalize_inputs: bool = True # Architecture / modeling. # Neural networks. diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index e124c25d88..f7f4c7b96f 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -79,9 +79,7 @@ def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tenso config.validate_features() self.config = config - self.normalize_inputs = ( - Normalize(config.input_features, dataset_stats) if config.normalize_inputs else nn.Identity() - ) + self.normalize_inputs = Normalize(config.input_features, dataset_stats) self.normalize_targets = Normalize(config.output_features, dataset_stats) self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) diff --git a/lerobot/common/policies/utils.py b/lerobot/common/policies/utils.py index 799443a48a..c06e620ba1 100644 --- a/lerobot/common/policies/utils.py +++ b/lerobot/common/policies/utils.py @@ -13,13 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging -from pathlib import Path import torch -from huggingface_hub import snapshot_download -from huggingface_hub.errors import RepositoryNotFoundError -from huggingface_hub.utils._validators import HFValidationError from torch import nn @@ -70,28 +65,3 @@ def get_output_shape(module: nn.Module, input_shape: tuple) -> tuple: with torch.inference_mode(): output = module(dummy_input) return tuple(output.shape) - - -def get_pretrained_policy_path(pretrained_policy_name_or_path, revision=None): - try: - pretrained_policy_path = Path( - snapshot_download(str(pretrained_policy_name_or_path), revision=revision) - ) - except (HFValidationError, RepositoryNotFoundError) as e: - if isinstance(e, HFValidationError): - error_message = ( - "The provided pretrained_policy_name_or_path is not a valid Hugging Face Hub repo ID." - ) - else: - error_message = ( - "The provided pretrained_policy_name_or_path was not found on the Hugging Face Hub." - ) - - logging.warning(f"{error_message} Treating it as a local directory.") - pretrained_policy_path = Path(pretrained_policy_name_or_path) - if not pretrained_policy_path.is_dir() or not pretrained_policy_path.exists(): - raise ValueError( - "The provided pretrained_policy_name_or_path is not a valid/existing Hugging Face Hub " - "repo ID, nor is it an existing local directory." - ) - return pretrained_policy_path diff --git a/lerobot/common/policies/vqbet/configuration_vqbet.py b/lerobot/common/policies/vqbet/configuration_vqbet.py index c6f5d542fb..3f87572977 100644 --- a/lerobot/common/policies/vqbet/configuration_vqbet.py +++ b/lerobot/common/policies/vqbet/configuration_vqbet.py @@ -168,7 +168,7 @@ def validate_features(self) -> None: # Note: this check was previously performed inside VQBeTRgbEncoder in the form of # assert len(image_keys) == 1 if not len(self.image_features) == 1: - raise ValueError("You must provide at least one image among the inputs.") + raise ValueError("You must provide only one image among the inputs.") if self.crop_shape is not None: for image_ft in self.image_features: diff --git a/lerobot/common/robot_devices/cameras/configs.py b/lerobot/common/robot_devices/cameras/configs.py new file mode 100644 index 0000000000..6acdbd3eac --- /dev/null +++ b/lerobot/common/robot_devices/cameras/configs.py @@ -0,0 +1,100 @@ +import abc +from dataclasses import dataclass + +import draccus + + +@dataclass +class CameraConfig(draccus.ChoiceRegistry, abc.ABC): + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +@CameraConfig.register_subclass("opencv") +@dataclass +class OpenCVCameraConfig(CameraConfig): + """ + Example of tested options for Intel Real Sense D405: + + ```python + OpenCVCameraConfig(0, 30, 640, 480) + OpenCVCameraConfig(0, 60, 640, 480) + OpenCVCameraConfig(0, 90, 640, 480) + OpenCVCameraConfig(0, 30, 1280, 720) + ``` + """ + + camera_index: int + fps: int | None = None + width: int | None = None + height: int | None = None + color_mode: str = "rgb" + channels: int | None = None + rotation: int | None = None + mock: bool = False + + def __post_init__(self): + if self.color_mode not in ["rgb", "bgr"]: + raise ValueError( + f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided." + ) + + self.channels = 3 + + if self.rotation not in [-90, None, 90, 180]: + raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})") + + +@CameraConfig.register_subclass("intelrealsense") +@dataclass +class IntelRealSenseCameraConfig(CameraConfig): + """ + Example of tested options for Intel Real Sense D405: + + ```python + IntelRealSenseCameraConfig(128422271347, 30, 640, 480) + IntelRealSenseCameraConfig(128422271347, 60, 640, 480) + IntelRealSenseCameraConfig(128422271347, 90, 640, 480) + IntelRealSenseCameraConfig(128422271347, 30, 1280, 720) + IntelRealSenseCameraConfig(128422271347, 30, 640, 480, use_depth=True) + IntelRealSenseCameraConfig(128422271347, 30, 640, 480, rotation=90) + ``` + """ + + name: str | None = None + serial_number: int | None = None + fps: int | None = None + width: int | None = None + height: int | None = None + color_mode: str = "rgb" + channels: int | None = None + use_depth: bool = False + force_hardware_reset: bool = True + rotation: int | None = None + mock: bool = False + + def __post_init__(self): + # bool is stronger than is None, since it works with empty strings + if bool(self.name) and bool(self.serial_number): + raise ValueError( + f"One of them must be set: name or serial_number, but {self.name=} and {self.serial_number=} provided." + ) + + if self.color_mode not in ["rgb", "bgr"]: + raise ValueError( + f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided." + ) + + self.channels = 3 + + at_least_one_is_not_none = self.fps is not None or self.width is not None or self.height is not None + at_least_one_is_none = self.fps is None or self.width is None or self.height is None + if at_least_one_is_not_none and at_least_one_is_none: + raise ValueError( + "For `fps`, `width` and `height`, either all of them need to be set, or none of them, " + f"but {self.fps=}, {self.width=}, {self.height=} were provided." + ) + + if self.rotation not in [-90, None, 90, 180]: + raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})") diff --git a/lerobot/common/robot_devices/cameras/intelrealsense.py b/lerobot/common/robot_devices/cameras/intelrealsense.py index 84ac540f2c..affb5e6249 100644 --- a/lerobot/common/robot_devices/cameras/intelrealsense.py +++ b/lerobot/common/robot_devices/cameras/intelrealsense.py @@ -11,13 +11,14 @@ import time import traceback from collections import Counter -from dataclasses import dataclass, replace +from dataclasses import replace from pathlib import Path from threading import Thread import numpy as np from PIL import Image +from lerobot.common.robot_devices.cameras.configs import IntelRealSenseCameraConfig from lerobot.common.robot_devices.utils import ( RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError, @@ -94,7 +95,7 @@ def save_images_from_cameras( cameras = [] for cam_sn in serial_numbers: print(f"{cam_sn=}") - camera = IntelRealSenseCamera(cam_sn, fps=fps, width=width, height=height, mock=mock) + camera = IntelRealSenseCamera(serial_number=cam_sn, fps=fps, width=width, height=height, mock=mock) camera.connect() print( f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})" @@ -149,51 +150,6 @@ def save_images_from_cameras( camera.disconnect() -@dataclass -class IntelRealSenseCameraConfig: - """ - Example of tested options for Intel Real Sense D405: - - ```python - IntelRealSenseCameraConfig(30, 640, 480) - IntelRealSenseCameraConfig(60, 640, 480) - IntelRealSenseCameraConfig(90, 640, 480) - IntelRealSenseCameraConfig(30, 1280, 720) - IntelRealSenseCameraConfig(30, 640, 480, use_depth=True) - IntelRealSenseCameraConfig(30, 640, 480, rotation=90) - ``` - """ - - fps: int | None = None - width: int | None = None - height: int | None = None - color_mode: str = "rgb" - channels: int | None = None - use_depth: bool = False - force_hardware_reset: bool = True - rotation: int | None = None - mock: bool = False - - def __post_init__(self): - if self.color_mode not in ["rgb", "bgr"]: - raise ValueError( - f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided." - ) - - self.channels = 3 - - at_least_one_is_not_none = self.fps is not None or self.width is not None or self.height is not None - at_least_one_is_none = self.fps is None or self.width is None or self.height is None - if at_least_one_is_not_none and at_least_one_is_none: - raise ValueError( - "For `fps`, `width` and `height`, either all of them need to be set, or none of them, " - f"but {self.fps=}, {self.width=}, {self.height=} were provided." - ) - - if self.rotation not in [-90, None, 90, 180]: - raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})") - - class IntelRealSenseCamera: """ The IntelRealSenseCamera class is similar to OpenCVCamera class but adds additional features for Intel Real Sense cameras: @@ -212,7 +168,7 @@ class IntelRealSenseCamera: Example of usage: ```python # Instantiate with its serial number - camera = IntelRealSenseCamera(128422271347) + camera = IntelRealSenseCamera(serial_number=128422271347) # Or by its name if it's unique camera = IntelRealSenseCamera.init_from_name("Intel RealSense D405") camera.connect() @@ -223,19 +179,19 @@ class IntelRealSenseCamera: Example of changing default fps, width, height and color_mode: ```python - camera = IntelRealSenseCamera(serial_number, fps=30, width=1280, height=720) + camera = IntelRealSenseCamera(serial_number=128422271347, fps=30, width=1280, height=720) camera = connect() # applies the settings, might error out if these settings are not compatible with the camera - camera = IntelRealSenseCamera(serial_number, fps=90, width=640, height=480) + camera = IntelRealSenseCamera(serial_number=128422271347, fps=90, width=640, height=480) camera = connect() - camera = IntelRealSenseCamera(serial_number, fps=90, width=640, height=480, color_mode="bgr") + camera = IntelRealSenseCamera(serial_number=128422271347, fps=90, width=640, height=480, color_mode="bgr") camera = connect() ``` Example of returning depth: ```python - camera = IntelRealSenseCamera(serial_number, use_depth=True) + camera = IntelRealSenseCamera(serial_number=128422271347, use_depth=True) camera.connect() color_image, depth_map = camera.read() ``` @@ -243,17 +199,12 @@ class IntelRealSenseCamera: def __init__( self, - serial_number: int, config: IntelRealSenseCameraConfig | None = None, **kwargs, ): - if config is None: - config = IntelRealSenseCameraConfig() - - # Overwrite the config arguments using kwargs - config = replace(config, **kwargs) + config = IntelRealSenseCameraConfig(**kwargs) if config is None else replace(config, **kwargs) - self.serial_number = serial_number + self.serial_number = config.serial_number self.fps = config.fps self.width = config.width self.height = config.height diff --git a/lerobot/common/robot_devices/cameras/opencv.py b/lerobot/common/robot_devices/cameras/opencv.py index d284cf55a9..99f1ec7821 100644 --- a/lerobot/common/robot_devices/cameras/opencv.py +++ b/lerobot/common/robot_devices/cameras/opencv.py @@ -9,13 +9,14 @@ import shutil import threading import time -from dataclasses import dataclass, replace +from dataclasses import replace from pathlib import Path from threading import Thread import numpy as np from PIL import Image +from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig from lerobot.common.robot_devices.utils import ( RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError, @@ -126,7 +127,7 @@ def save_images_from_cameras( print("Connecting cameras") cameras = [] for cam_idx in camera_ids: - camera = OpenCVCamera(cam_idx, fps=fps, width=width, height=height, mock=mock) + camera = OpenCVCamera(camera_index=cam_idx, fps=fps, width=width, height=height, mock=mock) camera.connect() print( f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, " @@ -175,39 +176,6 @@ def save_images_from_cameras( print(f"Images have been saved to {images_dir}") -@dataclass -class OpenCVCameraConfig: - """ - Example of tested options for Intel Real Sense D405: - - ```python - OpenCVCameraConfig(30, 640, 480) - OpenCVCameraConfig(60, 640, 480) - OpenCVCameraConfig(90, 640, 480) - OpenCVCameraConfig(30, 1280, 720) - ``` - """ - - fps: int | None = None - width: int | None = None - height: int | None = None - color_mode: str = "rgb" - channels: int | None = None - rotation: int | None = None - mock: bool = False - - def __post_init__(self): - if self.color_mode not in ["rgb", "bgr"]: - raise ValueError( - f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided." - ) - - self.channels = 3 - - if self.rotation not in [-90, None, 90, 180]: - raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})") - - class OpenCVCamera: """ The OpenCVCamera class allows to efficiently record images from cameras. It relies on opencv2 to communicate @@ -236,25 +204,21 @@ class OpenCVCamera: Example of changing default fps, width, height and color_mode: ```python - camera = OpenCVCamera(0, fps=30, width=1280, height=720) + camera = OpenCVCamera(camera_index=0, fps=30, width=1280, height=720) camera = connect() # applies the settings, might error out if these settings are not compatible with the camera - camera = OpenCVCamera(0, fps=90, width=640, height=480) + camera = OpenCVCamera(camera_index=0, fps=90, width=640, height=480) camera = connect() - camera = OpenCVCamera(0, fps=90, width=640, height=480, color_mode="bgr") + camera = OpenCVCamera(camera_index=0, fps=90, width=640, height=480, color_mode="bgr") camera = connect() ``` """ - def __init__(self, camera_index: int | str, config: OpenCVCameraConfig | None = None, **kwargs): - if config is None: - config = OpenCVCameraConfig() - - # Overwrite config arguments using kwargs - config = replace(config, **kwargs) + def __init__(self, config: OpenCVCameraConfig | None = None, **kwargs): + config = OpenCVCameraConfig(**kwargs) if config is None else replace(config, **kwargs) - self.camera_index = camera_index + self.camera_index = config.camera_index self.port = None # Linux uses ports for connecting to cameras @@ -266,7 +230,7 @@ def __init__(self, camera_index: int | str, config: OpenCVCameraConfig | None = # Retrieve the camera index from a potentially symlinked path self.camera_index = get_camera_index_from_unix_port(self.port) else: - raise ValueError(f"Please check the provided camera_index: {camera_index}") + raise ValueError(f"Please check the provided camera_index: {self.camera_index}") self.fps = config.fps self.width = config.width diff --git a/lerobot/common/robot_devices/cameras/utils.py b/lerobot/common/robot_devices/cameras/utils.py index 7904a57a5c..73675fc250 100644 --- a/lerobot/common/robot_devices/cameras/utils.py +++ b/lerobot/common/robot_devices/cameras/utils.py @@ -2,6 +2,8 @@ import numpy as np +from lerobot.common.robot_devices.cameras.configs import CameraConfig + # Defines a camera type class Camera(Protocol): @@ -9,3 +11,37 @@ def connect(self): ... def read(self, temporary_color: str | None = None) -> np.ndarray: ... def async_read(self) -> np.ndarray: ... def disconnect(self): ... + + +def make_cameras_from_configs(camera_configs: dict[str, CameraConfig]) -> list[Camera]: + cameras = {} + + for key, cfg in camera_configs.items(): + if cfg.type == "opencv": + from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera + + cameras[key] = OpenCVCamera(cfg) + + elif cfg.type == "intelrealsense": + from lerobot.common.robot_devices.cameras.intelrealsense import IntelRealSenseCamera + + cameras[key] = IntelRealSenseCamera(cfg) + else: + raise ValueError(f"The motor type '{cfg.type}' is not valid.") + + return cameras + + +def make_camera(camera_type, **kwargs) -> Camera: + if camera_type == "opencv": + from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera + + return OpenCVCamera(**kwargs) + + elif camera_type == "intelrealsense": + from lerobot.common.robot_devices.cameras.intelrealsense import IntelRealSenseCamera + + return IntelRealSenseCamera(**kwargs) + + else: + raise ValueError(f"The camera type '{camera_type}' is not valid.") diff --git a/lerobot/common/robot_devices/control_configs.py b/lerobot/common/robot_devices/control_configs.py new file mode 100644 index 0000000000..30c4984901 --- /dev/null +++ b/lerobot/common/robot_devices/control_configs.py @@ -0,0 +1,123 @@ +from dataclasses import dataclass +from pathlib import Path + +import draccus + +from lerobot.common.robot_devices.robots.configs import RobotConfig +from lerobot.configs import parser +from lerobot.configs.policies import PretrainedConfig + + +@dataclass +class ControlConfig(draccus.ChoiceRegistry): + pass + + +@ControlConfig.register_subclass("calibrate") +@dataclass +class CalibrateControlConfig(ControlConfig): + # List of arms to calibrate (e.g. `--arms left_follower right_follower left_leader`) + arms: list[str] | None = None + + +@ControlConfig.register_subclass("teleoperate") +@dataclass +class TeleoperateControlConfig(ControlConfig): + # Limit the maximum frames per second. By default, no limit. + fps: int | None = None + teleop_time_s: float | None = None + # Display all cameras on screen + display_cameras: bool = False + + +@ControlConfig.register_subclass("record") +@dataclass +class RecordControlConfig(ControlConfig): + # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). + repo_id: str + # A short but accurate description of the task performed during the recording (e.g. "Pick the Lego block and drop it in the box on the right.") + single_task: str + # Root directory where the dataset will be stored (e.g. 'dataset/path'). + root: str | Path | None = None + policy: PretrainedConfig | None = None + # TODO(rcadene, aliberts): By default, use device and use_amp values from policy checkpoint. + device: str | None = None # cuda | cpu | mps + # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, + # automatic gradient scaling is used. + use_amp: bool | None = None + # Limit the frames per second. By default, uses the policy fps. + fps: int | None = None + # Number of seconds before starting data collection. It allows the robot devices to warmup and synchronize. + warmup_time_s: int | float = 10 + # Number of seconds for data recording for each episode. + episode_time_s: int | float = 60 + # Number of seconds for resetting the environment after each episode. + reset_time_s: int | float = 60 + # Number of episodes to record. + num_episodes: int = 50 + # Encode frames in the dataset into video + video: bool = True + # By default, run the computation of the data statistics at the end of data collection. Compute intensive and not required to just replay an episode. + run_compute_stats: bool = True + # Upload dataset to Hugging Face hub. + push_to_hub: bool = True + # Upload on private repository on the Hugging Face hub. + private: bool = False + # Add tags to your dataset on the hub. + tags: list[str] | None = None + # Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only; + # set to ≥1 to use subprocesses, each using threads to write images. The best number of processes + # and threads depends on your system. We recommend 4 threads per camera with 0 processes. + # If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses. + num_image_writer_processes: int = 0 + # Number of threads writing the frames as png images on disk, per camera. + # Too many threads might cause unstable teleoperation fps due to main thread being blocked. + # Not enough threads might cause low camera fps. + num_image_writer_threads_per_camera: int = 4 + # Display all cameras on screen + display_cameras: bool = True + # Use vocal synthesis to read events. + play_sounds: bool = True + # Resume recording on an existing dataset. + resume: bool = False + # TODO(rcadene, aliberts): remove local_files_only when refactor with dataset as argument + # Use local files only. By default, this script will try to fetch the dataset from the hub if it exists. + local_files_only: bool = False + + def __post_init__(self): + # HACK: We parse again the cli args here to get the pretrained path if there was one. + policy_path = parser.get_path_arg("policy") + if policy_path: + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.pretrained_path = policy_path + + if self.policy is not None: + if self.device is None: + raise ValueError("Set one of the following device: cuda, cpu or mps") + elif self.device == "cuda" and self.use_amp is None: + raise ValueError("Set 'use_amp' to True or False.") + + +@ControlConfig.register_subclass("replay") +@dataclass +class ReplayControlConfig(ControlConfig): + # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). + repo_id: str + # Index of the episode to replay. + episode: int + # Root directory where the dataset will be stored (e.g. 'dataset/path'). + root: str | Path | None = None + # Limit the frames per second. By default, uses the dataset fps. + fps: int | None = None + # Use vocal synthesis to read events. + play_sounds: bool = True + # TODO(rcadene, aliberts): remove local_files_only when refactor with dataset as argument + # Use local files only. By default, this script will try to fetch the dataset from the hub if it exists. + local_files_only: bool = False + + +@dataclass +class ControlPipelineConfig: + robot: RobotConfig + control: ControlConfig diff --git a/lerobot/common/robot_devices/control_utils.py b/lerobot/common/robot_devices/control_utils.py index 0e609ed5c3..413a897095 100644 --- a/lerobot/common/robot_devices/control_utils.py +++ b/lerobot/common/robot_devices/control_utils.py @@ -19,11 +19,9 @@ from lerobot.common.datasets.image_writer import safe_stop_image_writer from lerobot.common.datasets.lerobot_dataset import LeRobotDataset from lerobot.common.datasets.utils import get_features_from_robot -from lerobot.common.policies.factory import make_policy -from lerobot.common.policies.utils import get_pretrained_policy_path from lerobot.common.robot_devices.robots.utils import Robot from lerobot.common.robot_devices.utils import busy_wait -from lerobot.common.utils.utils import get_safe_torch_device, init_hydra_config, set_global_seed +from lerobot.common.utils.utils import get_safe_torch_device def log_control_info(robot: Robot, dt_s, episode_index=None, frame_index=None, fps=None): @@ -161,26 +159,6 @@ def on_press(key): return listener, events -def init_policy(pretrained_policy_name_or_path, policy_overrides): - """Instantiate the policy and load fps, device and use_amp from config yaml""" - pretrained_policy_path = get_pretrained_policy_path(pretrained_policy_name_or_path) - hydra_cfg = init_hydra_config(pretrained_policy_path / "config.yaml", policy_overrides) - policy = make_policy(cfg=hydra_cfg, pretrained_policy_name_or_path=pretrained_policy_path) - - # Check device is available - device = get_safe_torch_device(hydra_cfg.device, log=True) - use_amp = hydra_cfg.use_amp - policy_fps = hydra_cfg.env.fps - - policy.eval() - policy.to(device) - - torch.backends.cudnn.benchmark = True - torch.backends.cuda.matmul.allow_tf32 = True - set_global_seed(hydra_cfg.seed) - return policy, policy_fps, device, use_amp - - def warmup_record( robot, events, @@ -233,9 +211,9 @@ def control_loop( dataset: LeRobotDataset | None = None, events=None, policy=None, - device=None, - use_amp=None, - fps=None, + device: torch.device | str | None = None, + use_amp: bool | None = None, + fps: int | None = None, ): # TODO(rcadene): Add option to record logs if not robot.is_connected: @@ -253,6 +231,9 @@ def control_loop( if dataset is not None and fps is not None and dataset.fps != fps: raise ValueError(f"The dataset fps should be equal to requested fps ({dataset['fps']} != {fps}).") + if isinstance(device, str): + device = get_safe_torch_device(device) + timestamp = 0 start_episode_t = time.perf_counter() while timestamp < control_time_s: @@ -324,21 +305,21 @@ def stop_recording(robot, listener, display_cameras): cv2.destroyAllWindows() -def sanity_check_dataset_name(repo_id, policy): +def sanity_check_dataset_name(repo_id, policy_cfg): _, dataset_name = repo_id.split("/") # either repo_id doesnt start with "eval_" and there is no policy # or repo_id starts with "eval_" and there is a policy # Check if dataset_name starts with "eval_" but policy is missing - if dataset_name.startswith("eval_") and policy is None: + if dataset_name.startswith("eval_") and policy_cfg is None: raise ValueError( - f"Your dataset name begins with 'eval_' ({dataset_name}), but no policy is provided." + f"Your dataset name begins with 'eval_' ({dataset_name}), but no policy is provided ({policy_cfg.type})." ) # Check if dataset_name does not start with "eval_" but policy is provided - if not dataset_name.startswith("eval_") and policy is not None: + if not dataset_name.startswith("eval_") and policy_cfg is not None: raise ValueError( - f"Your dataset name does not begin with 'eval_' ({dataset_name}), but a policy is provided ({policy})." + f"Your dataset name does not begin with 'eval_' ({dataset_name}), but a policy is provided ({policy_cfg.type})." ) diff --git a/lerobot/common/robot_devices/motors/configs.py b/lerobot/common/robot_devices/motors/configs.py new file mode 100644 index 0000000000..37b781f9e2 --- /dev/null +++ b/lerobot/common/robot_devices/motors/configs.py @@ -0,0 +1,27 @@ +import abc +from dataclasses import dataclass + +import draccus + + +@dataclass +class MotorsBusConfig(draccus.ChoiceRegistry, abc.ABC): + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +@MotorsBusConfig.register_subclass("dynamixel") +@dataclass +class DynamixelMotorsBusConfig(MotorsBusConfig): + port: str + motors: dict[str, tuple[int, str]] + mock: bool = False + + +@MotorsBusConfig.register_subclass("feetech") +@dataclass +class FeetechMotorsBusConfig(MotorsBusConfig): + port: str + motors: dict[str, tuple[int, str]] + mock: bool = False diff --git a/lerobot/common/robot_devices/motors/dynamixel.py b/lerobot/common/robot_devices/motors/dynamixel.py index 1e1396f766..bbadf2ba70 100644 --- a/lerobot/common/robot_devices/motors/dynamixel.py +++ b/lerobot/common/robot_devices/motors/dynamixel.py @@ -4,10 +4,12 @@ import time import traceback from copy import deepcopy +from dataclasses import replace import numpy as np import tqdm +from lerobot.common.robot_devices.motors.configs import DynamixelMotorsBusConfig from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError from lerobot.common.utils.utils import capture_timestamp_utc @@ -293,15 +295,16 @@ class DynamixelMotorsBus: def __init__( self, - port: str, - motors: dict[str, tuple[int, str]], + config: DynamixelMotorsBusConfig | None = None, extra_model_control_table: dict[str, list[tuple]] | None = None, extra_model_resolution: dict[str, int] | None = None, - mock=False, + **kwargs, ): - self.port = port - self.motors = motors - self.mock = mock + config = DynamixelMotorsBusConfig(**kwargs) if config is None else replace(config, **kwargs) + + self.port = config.port + self.motors = config.motors + self.mock = config.mock self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) if extra_model_control_table: diff --git a/lerobot/common/robot_devices/motors/feetech.py b/lerobot/common/robot_devices/motors/feetech.py index 0d5480f7a8..b915f47e26 100644 --- a/lerobot/common/robot_devices/motors/feetech.py +++ b/lerobot/common/robot_devices/motors/feetech.py @@ -4,10 +4,12 @@ import time import traceback from copy import deepcopy +from dataclasses import replace import numpy as np import tqdm +from lerobot.common.robot_devices.motors.configs import FeetechMotorsBusConfig from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError from lerobot.common.utils.utils import capture_timestamp_utc @@ -271,15 +273,16 @@ class FeetechMotorsBus: def __init__( self, - port: str, - motors: dict[str, tuple[int, str]], + config: FeetechMotorsBusConfig | None = None, extra_model_control_table: dict[str, list[tuple]] | None = None, extra_model_resolution: dict[str, int] | None = None, - mock=False, + **kwargs, ): - self.port = port - self.motors = motors - self.mock = mock + config = FeetechMotorsBusConfig(**kwargs) if config is None else replace(config, **kwargs) + + self.port = config.port + self.motors = config.motors + self.mock = config.mock self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) if extra_model_control_table: diff --git a/lerobot/common/robot_devices/motors/utils.py b/lerobot/common/robot_devices/motors/utils.py index 9ba314cea1..fb625339fd 100644 --- a/lerobot/common/robot_devices/motors/utils.py +++ b/lerobot/common/robot_devices/motors/utils.py @@ -1,5 +1,7 @@ from typing import Protocol +from lerobot.common.robot_devices.motors.configs import MotorsBusConfig + class MotorsBus(Protocol): def motor_names(self): ... @@ -8,3 +10,38 @@ def apply_calibration(self): ... def revert_calibration(self): ... def read(self): ... def write(self): ... + + +def make_motors_buses_from_configs(motors_bus_configs: dict[str, MotorsBusConfig]) -> list[MotorsBus]: + motors_buses = {} + + for key, cfg in motors_bus_configs.items(): + if cfg.type == "dynamixel": + from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus + + motors_buses[key] = DynamixelMotorsBus(cfg) + + elif cfg.type == "feetech": + from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus + + motors_buses[key] = FeetechMotorsBus(cfg) + + else: + raise ValueError(f"The motor type '{cfg.type}' is not valid.") + + return motors_buses + + +def make_motors_bus(motor_type: str, **kwargs) -> MotorsBus: + if motor_type == "dynamixel": + from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus + + return DynamixelMotorsBus(**kwargs) + + elif motor_type == "feetech": + from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus + + return FeetechMotorsBus(**kwargs) + + else: + raise ValueError(f"The motor type '{motor_type}' is not valid.") diff --git a/lerobot/common/robot_devices/robots/configs.py b/lerobot/common/robot_devices/robots/configs.py new file mode 100644 index 0000000000..6d615b2879 --- /dev/null +++ b/lerobot/common/robot_devices/robots/configs.py @@ -0,0 +1,516 @@ +import abc +from dataclasses import dataclass, field +from typing import Sequence + +import draccus + +from lerobot.common.robot_devices.cameras.configs import ( + CameraConfig, + IntelRealSenseCameraConfig, + OpenCVCameraConfig, +) +from lerobot.common.robot_devices.motors.configs import ( + DynamixelMotorsBusConfig, + FeetechMotorsBusConfig, + MotorsBusConfig, +) + + +@dataclass +class RobotConfig(draccus.ChoiceRegistry, abc.ABC): + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + + +# TODO(rcadene, aliberts): remove ManipulatorRobotConfig abstraction +@dataclass +class ManipulatorRobotConfig(RobotConfig): + leader_arms: dict[str, MotorsBusConfig] = field(default_factory=lambda: {}) + follower_arms: dict[str, MotorsBusConfig] = field(default_factory=lambda: {}) + cameras: dict[str, CameraConfig] = field(default_factory=lambda: {}) + + # Optionally limit the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length + # as the number of motors in your follower arms (assumes all follower arms have the same number of + # motors). + max_relative_target: list[float] | float | None = None + + # Optionally set the leader arm in torque mode with the gripper motor set to this angle. This makes it + # possible to squeeze the gripper and have it spring back to an open position on its own. If None, the + # gripper is not put in torque mode. + gripper_open_degree: float | None = None + + mock: bool = False + + def __post_init__(self): + if self.mock: + for arm in self.leader_arms.values(): + if not arm.mock: + arm.mock = True + for arm in self.follower_arms.values(): + if not arm.mock: + arm.mock = True + for cam in self.cameras.values(): + if not cam.mock: + cam.mock = True + + if self.max_relative_target is not None and isinstance(self.max_relative_target, Sequence): + for name in self.follower_arms: + if len(self.follower_arms[name].motors) != len(self.max_relative_target): + raise ValueError( + f"len(max_relative_target)={len(self.max_relative_target)} but the follower arm with name {name} has " + f"{len(self.follower_arms[name].motors)} motors. Please make sure that the " + f"`max_relative_target` list has as many parameters as there are motors per arm. " + "Note: This feature does not yet work with robots where different follower arms have " + "different numbers of motors." + ) + + +@RobotConfig.register_subclass("aloha") +@dataclass +class AlohaRobotConfig(ManipulatorRobotConfig): + # Specific to Aloha, LeRobot comes with default calibration files. Assuming the motors have been + # properly assembled, no manual calibration step is expected. If you need to run manual calibration, + # simply update this path to ".cache/calibration/aloha" + calibration_dir: str = ".cache/calibration/aloha_default" + + # /!\ FOR SAFETY, READ THIS /!\ + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + # For Aloha, for every goal position request, motor rotations are capped at 5 degrees by default. + # When you feel more confident with teleoperation or running the policy, you can extend + # this safety limit and even removing it by setting it to `null`. + # Also, everything is expected to work safely out-of-the-box, but we highly advise to + # first try to teleoperate the grippers only (by commenting out the rest of the motors in this yaml), + # then to gradually add more motors (by uncommenting), until you can teleoperate both arms fully + max_relative_target: int = 5 + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "left": DynamixelMotorsBusConfig( + # window_x + port="/dev/ttyDXL_leader_left", + motors={ + # name: (index, model) + "waist": [1, "xm430-w350"], + "shoulder": [2, "xm430-w350"], + "shoulder_shadow": [3, "xm430-w350"], + "elbow": [4, "xm430-w350"], + "elbow_shadow": [5, "xm430-w350"], + "forearm_roll": [6, "xm430-w350"], + "wrist_angle": [7, "xm430-w350"], + "wrist_rotate": [8, "xl430-w250"], + "gripper": [9, "xc430-w150"], + }, + ), + "right": DynamixelMotorsBusConfig( + # window_x + port="/dev/ttyDXL_leader_right", + motors={ + # name: (index, model) + "waist": [1, "xm430-w350"], + "shoulder": [2, "xm430-w350"], + "shoulder_shadow": [3, "xm430-w350"], + "elbow": [4, "xm430-w350"], + "elbow_shadow": [5, "xm430-w350"], + "forearm_roll": [6, "xm430-w350"], + "wrist_angle": [7, "xm430-w350"], + "wrist_rotate": [8, "xl430-w250"], + "gripper": [9, "xc430-w150"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "left": DynamixelMotorsBusConfig( + port="/dev/ttyDXL_follower_left", + motors={ + # name: (index, model) + "waist": [1, "xm540-w270"], + "shoulder": [2, "xm540-w270"], + "shoulder_shadow": [3, "xm540-w270"], + "elbow": [4, "xm540-w270"], + "elbow_shadow": [5, "xm540-w270"], + "forearm_roll": [6, "xm540-w270"], + "wrist_angle": [7, "xm540-w270"], + "wrist_rotate": [8, "xm430-w350"], + "gripper": [9, "xm430-w350"], + }, + ), + "right": DynamixelMotorsBusConfig( + port="/dev/ttyDXL_follower_right", + motors={ + # name: (index, model) + "waist": [1, "xm540-w270"], + "shoulder": [2, "xm540-w270"], + "shoulder_shadow": [3, "xm540-w270"], + "elbow": [4, "xm540-w270"], + "elbow_shadow": [5, "xm540-w270"], + "forearm_roll": [6, "xm540-w270"], + "wrist_angle": [7, "xm540-w270"], + "wrist_rotate": [8, "xm430-w350"], + "gripper": [9, "xm430-w350"], + }, + ), + } + ) + + # Troubleshooting: If one of your IntelRealSense cameras freeze during + # data recording due to bandwidth limit, you might need to plug the camera + # on another USB hub or PCIe card. + cameras: dict[str, CameraConfig] = field( + default_factory=lambda: { + "cam_high": IntelRealSenseCameraConfig( + serial_number=128422271347, + fps=30, + width=640, + height=480, + ), + "cam_low": IntelRealSenseCameraConfig( + serial_number=130322270656, + fps=30, + width=640, + height=480, + ), + "cam_left_wrist": IntelRealSenseCameraConfig( + serial_number=218622272670, + fps=30, + width=640, + height=480, + ), + "cam_right_wrist": IntelRealSenseCameraConfig( + serial_number=130322272300, + fps=30, + width=640, + height=480, + ), + } + ) + + mock: bool = False + + +@RobotConfig.register_subclass("koch") +@dataclass +class KochRobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/koch" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem585A0085511", + motors={ + # name: (index, model) + "shoulder_pan": [1, "xl330-m077"], + "shoulder_lift": [2, "xl330-m077"], + "elbow_flex": [3, "xl330-m077"], + "wrist_flex": [4, "xl330-m077"], + "wrist_roll": [5, "xl330-m077"], + "gripper": [6, "xl330-m077"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", + motors={ + # name: (index, model) + "shoulder_pan": [1, "xl430-w250"], + "shoulder_lift": [2, "xl430-w250"], + "elbow_flex": [3, "xl330-m288"], + "wrist_flex": [4, "xl330-m288"], + "wrist_roll": [5, "xl330-m288"], + "gripper": [6, "xl330-m288"], + }, + ), + } + ) + + cameras: dict[str, CameraConfig] = field( + default_factory=lambda: { + "laptop": OpenCVCameraConfig( + camera_index=0, + fps=30, + width=640, + height=480, + ), + "phone": OpenCVCameraConfig( + camera_index=1, + fps=30, + width=640, + height=480, + ), + } + ) + + # ~ Koch specific settings ~ + # Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible + # to squeeze the gripper and have it spring back to an open position on its own. + gripper_open_degree: float = 35.156 + + mock: bool = False + + +@RobotConfig.register_subclass("koch_bimanual") +@dataclass +class KochBimanualRobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/koch_bimanual" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "left": DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem585A0085511", + motors={ + # name: (index, model) + "shoulder_pan": [1, "xl330-m077"], + "shoulder_lift": [2, "xl330-m077"], + "elbow_flex": [3, "xl330-m077"], + "wrist_flex": [4, "xl330-m077"], + "wrist_roll": [5, "xl330-m077"], + "gripper": [6, "xl330-m077"], + }, + ), + "right": DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem575E0031751", + motors={ + # name: (index, model) + "shoulder_pan": [1, "xl330-m077"], + "shoulder_lift": [2, "xl330-m077"], + "elbow_flex": [3, "xl330-m077"], + "wrist_flex": [4, "xl330-m077"], + "wrist_roll": [5, "xl330-m077"], + "gripper": [6, "xl330-m077"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "left": DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", + motors={ + # name: (index, model) + "shoulder_pan": [1, "xl430-w250"], + "shoulder_lift": [2, "xl430-w250"], + "elbow_flex": [3, "xl330-m288"], + "wrist_flex": [4, "xl330-m288"], + "wrist_roll": [5, "xl330-m288"], + "gripper": [6, "xl330-m288"], + }, + ), + "right": DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem575E0032081", + motors={ + # name: (index, model) + "shoulder_pan": [1, "xl430-w250"], + "shoulder_lift": [2, "xl430-w250"], + "elbow_flex": [3, "xl330-m288"], + "wrist_flex": [4, "xl330-m288"], + "wrist_roll": [5, "xl330-m288"], + "gripper": [6, "xl330-m288"], + }, + ), + } + ) + + cameras: dict[str, CameraConfig] = field( + default_factory=lambda: { + "laptop": OpenCVCameraConfig( + camera_index=0, + fps=30, + width=640, + height=480, + ), + "phone": OpenCVCameraConfig( + camera_index=1, + fps=30, + width=640, + height=480, + ), + } + ) + + # ~ Koch specific settings ~ + # Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible + # to squeeze the gripper and have it spring back to an open position on its own. + gripper_open_degree: float = 35.156 + + mock: bool = False + + +@RobotConfig.register_subclass("moss") +@dataclass +class MossRobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/moss" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem58760431091", + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + cameras: dict[str, CameraConfig] = field( + default_factory=lambda: { + "laptop": OpenCVCameraConfig( + camera_index=0, + fps=30, + width=640, + height=480, + ), + "phone": OpenCVCameraConfig( + camera_index=1, + fps=30, + width=640, + height=480, + ), + } + ) + + mock: bool = False + + +@RobotConfig.register_subclass("so100") +@dataclass +class So100RobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/so100" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem58760431091", + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + cameras: dict[str, CameraConfig] = field( + default_factory=lambda: { + "laptop": OpenCVCameraConfig( + camera_index=0, + fps=30, + width=640, + height=480, + ), + "phone": OpenCVCameraConfig( + camera_index=1, + fps=30, + width=640, + height=480, + ), + } + ) + + mock: bool = False + + +@RobotConfig.register_subclass("stretch") +@dataclass +class StretchRobotConfig(RobotConfig): + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + cameras: dict[str, CameraConfig] = field( + default_factory=lambda: { + "navigation": OpenCVCameraConfig( + camera_index="/dev/hello-nav-head-camera", + fps=10, + width=1280, + height=720, + rotation=-90, + ), + "head": IntelRealSenseCameraConfig( + name="Intel RealSense D435I", + fps=30, + width=640, + height=480, + rotation=90, + ), + "wrist": IntelRealSenseCameraConfig( + name="Intel RealSense D405", + fps=30, + width=640, + height=480, + ), + } + ) + + mock: bool = False diff --git a/lerobot/common/robot_devices/robots/factory.py b/lerobot/common/robot_devices/robots/factory.py deleted file mode 100644 index 17e8e5e6a9..0000000000 --- a/lerobot/common/robot_devices/robots/factory.py +++ /dev/null @@ -1,9 +0,0 @@ -import hydra -from omegaconf import DictConfig - -from lerobot.common.robot_devices.robots.utils import Robot - - -def make_robot(cfg: DictConfig) -> Robot: - robot = hydra.utils.instantiate(cfg) - return robot diff --git a/lerobot/common/robot_devices/robots/manipulator.py b/lerobot/common/robot_devices/robots/manipulator.py index 618105064a..a46d7dc058 100644 --- a/lerobot/common/robot_devices/robots/manipulator.py +++ b/lerobot/common/robot_devices/robots/manipulator.py @@ -8,15 +8,15 @@ import logging import time import warnings -from dataclasses import dataclass, field, replace +from dataclasses import replace from pathlib import Path -from typing import Sequence import numpy as np import torch -from lerobot.common.robot_devices.cameras.utils import Camera -from lerobot.common.robot_devices.motors.utils import MotorsBus +from lerobot.common.robot_devices.cameras.utils import make_cameras_from_configs +from lerobot.common.robot_devices.motors.utils import MotorsBus, make_motors_buses_from_configs +from lerobot.common.robot_devices.robots.configs import ManipulatorRobotConfig from lerobot.common.robot_devices.robots.utils import get_arm_id from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError @@ -41,50 +41,6 @@ def ensure_safe_goal_position( return safe_goal_pos -@dataclass -class ManipulatorRobotConfig: - """ - Example of usage: - ```python - ManipulatorRobotConfig() - ``` - """ - - # Define all components of the robot - robot_type: str = "koch" - leader_arms: dict[str, MotorsBus] = field(default_factory=lambda: {}) - follower_arms: dict[str, MotorsBus] = field(default_factory=lambda: {}) - cameras: dict[str, Camera] = field(default_factory=lambda: {}) - - # Optionally limit the magnitude of the relative positional target vector for safety purposes. - # Set this to a positive scalar to have the same value for all motors, or a list that is the same length - # as the number of motors in your follower arms (assumes all follower arms have the same number of - # motors). - max_relative_target: list[float] | float | None = None - - # Optionally set the leader arm in torque mode with the gripper motor set to this angle. This makes it - # possible to squeeze the gripper and have it spring back to an open position on its own. If None, the - # gripper is not put in torque mode. - gripper_open_degree: float | None = None - - def __setattr__(self, prop: str, val): - if prop == "max_relative_target" and val is not None and isinstance(val, Sequence): - for name in self.follower_arms: - if len(self.follower_arms[name].motors) != len(val): - raise ValueError( - f"len(max_relative_target)={len(val)} but the follower arm with name {name} has " - f"{len(self.follower_arms[name].motors)} motors. Please make sure that the " - f"`max_relative_target` list has as many parameters as there are motors per arm. " - "Note: This feature does not yet work with robots where different follower arms have " - "different numbers of motors." - ) - super().__setattr__(prop, val) - - def __post_init__(self): - if self.robot_type not in ["koch", "koch_bimanual", "aloha", "so100", "moss"]: - raise ValueError(f"Provided robot type ({self.robot_type}) is not supported.") - - class ManipulatorRobot: # TODO(rcadene): Implement force feedback """This class allows to control any manipulator robot of various number of motors. @@ -95,11 +51,16 @@ class ManipulatorRobot: - [Koch v1.1](https://github.com/jess-moss/koch-v1-1) developed by Jess Moss - [Aloha](https://www.trossenrobotics.com/aloha-kits) developed by Trossen Robotics - Example of highest frequency teleoperation without camera: + Example of instantiation, a pre-defined robot config is required: + ```python + robot = ManipulatorRobot(KochRobotConfig()) + ``` + + Example of overwritting motors during instantiation: ```python # Defines how to communicate with the motors of the leader and follower arms leader_arms = { - "main": DynamixelMotorsBus( + "main": DynamixelMotorsBusConfig( port="/dev/tty.usbmodem575E0031751", motors={ # name: (index, model) @@ -113,7 +74,7 @@ class ManipulatorRobot: ), } follower_arms = { - "main": DynamixelMotorsBus( + "main": DynamixelMotorsBusConfig( port="/dev/tty.usbmodem575E0032081", motors={ # name: (index, model) @@ -126,35 +87,11 @@ class ManipulatorRobot: }, ), } - robot = ManipulatorRobot( - robot_type="koch", - calibration_dir=".cache/calibration/koch", - leader_arms=leader_arms, - follower_arms=follower_arms, - ) - - # Connect motors buses and cameras if any (Required) - robot.connect() - - while True: - robot.teleop_step() + robot_config = KochRobotConfig(leader_arms=leader_arms, follower_arms=follower_arms) + robot = ManipulatorRobot(robot_config) ``` - Example of highest frequency data collection without camera: - ```python - # Assumes leader and follower arms have been instantiated already (see first example) - robot = ManipulatorRobot( - robot_type="koch", - calibration_dir=".cache/calibration/koch", - leader_arms=leader_arms, - follower_arms=follower_arms, - ) - robot.connect() - while True: - observation, action = robot.teleop_step(record_data=True) - ``` - - Example of highest frequency data collection with cameras: + Example of overwritting cameras during instantiation: ```python # Defines how to communicate with 2 cameras connected to the computer. # Here, the webcam of the laptop and the phone (connected in USB to the laptop) @@ -164,31 +101,28 @@ class ManipulatorRobot: "laptop": OpenCVCamera(camera_index=0, fps=30, width=640, height=480), "phone": OpenCVCamera(camera_index=1, fps=30, width=640, height=480), } + robot = ManipulatorRobot(KochRobotConfig(cameras=cameras)) + ``` - # Assumes leader and follower arms have been instantiated already (see first example) - robot = ManipulatorRobot( - robot_type="koch", - calibration_dir=".cache/calibration/koch", - leader_arms=leader_arms, - follower_arms=follower_arms, - cameras=cameras, - ) + Once the robot is instantiated, connect motors buses and cameras if any (Required): + ```python robot.connect() + ``` + + Example of highest frequency teleoperation, which doesn't require cameras: + ``` + while True: + robot.teleop_step() + ``` + + Example of highest frequency data collection from motors and cameras (if any): + ```python while True: observation, action = robot.teleop_step(record_data=True) ``` - Example of controlling the robot with a policy (without running multiple policies in parallel to ensure highest frequency): + Example of controlling the robot with a policy: ```python - # Assumes leader and follower arms + cameras have been instantiated already (see previous example) - robot = ManipulatorRobot( - robot_type="koch", - calibration_dir=".cache/calibration/koch", - leader_arms=leader_arms, - follower_arms=follower_arms, - cameras=cameras, - ) - robot.connect() while True: # Uses the follower arms and cameras to capture an observation observation = robot.capture_observation() @@ -209,20 +143,17 @@ class ManipulatorRobot: def __init__( self, - config: ManipulatorRobotConfig | None = None, - calibration_dir: Path = ".cache/calibration/koch", + config: ManipulatorRobotConfig, **kwargs, ): - if config is None: - config = ManipulatorRobotConfig() # Overwrite config arguments using kwargs self.config = replace(config, **kwargs) - self.calibration_dir = Path(calibration_dir) - self.robot_type = self.config.robot_type - self.leader_arms = self.config.leader_arms - self.follower_arms = self.config.follower_arms - self.cameras = self.config.cameras + self.robot_type = self.config.type + self.calibration_dir = Path(self.config.calibration_dir) + self.leader_arms = make_motors_buses_from_configs(self.config.leader_arms) + self.follower_arms = make_motors_buses_from_configs(self.config.follower_arms) + self.cameras = make_cameras_from_configs(self.config.cameras) self.is_connected = False self.logs = {} diff --git a/lerobot/common/robot_devices/robots/stretch.py b/lerobot/common/robot_devices/robots/stretch.py index ff86b6d809..b63bf94172 100644 --- a/lerobot/common/robot_devices/robots/stretch.py +++ b/lerobot/common/robot_devices/robots/stretch.py @@ -15,23 +15,14 @@ # limitations under the License. import time -from dataclasses import dataclass, field, replace +from dataclasses import replace import torch from stretch_body.gamepad_teleop import GamePadTeleop from stretch_body.robot import Robot as StretchAPI from stretch_body.robot_params import RobotParams -from lerobot.common.robot_devices.cameras.utils import Camera - - -@dataclass -class StretchRobotConfig: - robot_type: str | None = "stretch" - cameras: dict[str, Camera] = field(default_factory=lambda: {}) - # TODO(aliberts): add feature with max_relative target - # TODO(aliberts): add comment on max_relative target - max_relative_target: list[float] | float | None = None +from lerobot.common.robot_devices.robots.configs import StretchRobotConfig class StretchRobot(StretchAPI): @@ -40,11 +31,12 @@ class StretchRobot(StretchAPI): def __init__(self, config: StretchRobotConfig | None = None, **kwargs): super().__init__() if config is None: - config = StretchRobotConfig() - # Overwrite config arguments using kwargs - self.config = replace(config, **kwargs) + self.config = StretchRobotConfig(**kwargs) + else: + # Overwrite config arguments using kwargs + self.config = replace(config, **kwargs) - self.robot_type = self.config.robot_type + self.robot_type = self.config.type self.cameras = self.config.cameras self.is_connected = False self.teleop = None diff --git a/lerobot/common/robot_devices/robots/utils.py b/lerobot/common/robot_devices/robots/utils.py index a40db13124..86ff64732b 100644 --- a/lerobot/common/robot_devices/robots/utils.py +++ b/lerobot/common/robot_devices/robots/utils.py @@ -1,5 +1,16 @@ from typing import Protocol +from lerobot.common.robot_devices.robots.configs import ( + AlohaRobotConfig, + KochBimanualRobotConfig, + KochRobotConfig, + ManipulatorRobotConfig, + MossRobotConfig, + RobotConfig, + So100RobotConfig, + StretchRobotConfig, +) + def get_arm_id(name, arm_type): """Returns the string identifier of a robot arm. For instance, for a bimanual manipulator @@ -19,3 +30,36 @@ def teleop_step(self, record_data=False): ... def capture_observation(self): ... def send_action(self, action): ... def disconnect(self): ... + + +def make_robot_config(robot_type: str, **kwargs) -> RobotConfig: + if robot_type == "aloha": + return AlohaRobotConfig(**kwargs) + elif robot_type == "koch": + return KochRobotConfig(**kwargs) + elif robot_type == "koch_bimanual": + return KochBimanualRobotConfig(**kwargs) + elif robot_type == "moss": + return MossRobotConfig(**kwargs) + elif robot_type == "so100": + return So100RobotConfig(**kwargs) + elif robot_type == "stretch": + return StretchRobotConfig(**kwargs) + else: + raise ValueError(f"Robot type '{robot_type}' is not available.") + + +def make_robot_from_config(config: RobotConfig): + if isinstance(config, ManipulatorRobotConfig): + from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot + + return ManipulatorRobot(config) + else: + from lerobot.common.robot_devices.robots.stretch import StretchRobot + + return StretchRobot(config) + + +def make_robot(robot_type: str, **kwargs) -> Robot: + config = make_robot_config(robot_type, **kwargs) + return make_robot_from_config(config) diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index 8adcc2d62a..45af5907f6 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -23,10 +23,8 @@ from pathlib import Path from typing import Any, Generator -import hydra import numpy as np import torch -from omegaconf import DictConfig def none_or_int(value): @@ -159,22 +157,6 @@ def _relative_path_between(path1: Path, path2: Path) -> Path: ) -def init_hydra_config(config_path: str, overrides: list[str] | None = None) -> DictConfig: - """Initialize a Hydra config given only the path to the relevant config file. - - For config resolution, it is assumed that the config file's parent is the Hydra config dir. - """ - # TODO(alexander-soare): Resolve configs without Hydra initialization. - hydra.core.global_hydra.GlobalHydra.instance().clear() - # Hydra needs a path relative to this file. - hydra.initialize( - str(_relative_path_between(Path(config_path).absolute().parent, Path(__file__).absolute().parent)), - version_base="1.2", - ) - cfg = hydra.compose(Path(config_path).stem, overrides) - return cfg - - def print_cuda_memory_usage(): """Use this function to locate and debug memory leak.""" import gc diff --git a/lerobot/configs/default.py b/lerobot/configs/default.py index 7f4d1ce12a..86987021d5 100644 --- a/lerobot/configs/default.py +++ b/lerobot/configs/default.py @@ -28,7 +28,7 @@ class DatasetConfig: # keys common between the datasets are kept. Each dataset gets and additional transform that inserts the # "dataset_index" into the returned item. The index mapping is made according to the order in which the # datsets are provided. - repo_id: str | list[str] + repo_id: str episodes: list[int] | None = None image_transforms: ImageTransformsConfig = field(default_factory=ImageTransformsConfig) local_files_only: bool = False diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index caba3f5a2b..b19033ace4 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -38,10 +38,11 @@ class EvalPipelineConfig: policy: PretrainedConfig | None = None output_dir: Path | None = None job_name: str | None = None - device: str = "cuda" # | cpu | mp + # TODO(rcadene, aliberts): By default, use device and use_amp values from policy checkpoint. + device: str | None = None # cuda | cpu | mps # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, # automatic gradient scaling is used. - use_amp: bool = False + use_amp: bool | None = None seed: int | None = 1000 def __post_init__(self): @@ -59,13 +60,21 @@ def __post_init__(self): self.policy.pretrained_path = policy_path if not self.job_name: - self.job_name = f"{self.env.type}_{self.policy.type}" + if self.env is None: + self.job_name = f"{self.policy.type}" + else: + self.job_name = f"{self.env.type}_{self.policy.type}" if not self.output_dir: now = dt.datetime.now() eval_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" self.output_dir = Path("outputs/eval") / eval_dir + if self.device is None: + raise ValueError("Set one of the following device: cuda, cpu or mps") + elif self.device == "cuda" and self.use_amp is None: + raise ValueError("Set 'use_amp' to True or False.") + @classmethod def __get_path_fields__(cls) -> list[Field]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index d85287cd25..ba00dcacc9 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -55,14 +55,6 @@ def __post_init__(self): self.type = self.get_choice_name(self.__class__) self.pretrained_path = None - # @property - # def pretrained_path(self) -> str | Path | None: - # return self._pretrained_path - - # @pretrained_path.setter - # def pretrained_path(self, path: str | Path): - # self._pretrained_path = path - @abc.abstractproperty def observation_delta_indices(self) -> list | None: raise NotImplementedError @@ -162,7 +154,8 @@ def parse_features_from_dataset(self, ds_meta: LeRobotDatasetMetadata): names = ds_meta.features[key]["names"] if len(shape) != 3: raise ValueError(f"Number of dimensions of {key} != 3 (shape={shape})") - if names[2] == "channel": # (h, w, c) -> (c, h, w) + # Backward compatibility for "channel" which is an error introduced in LeRobotDataset v2.0 for ported datasets. + if names[2] in ["channel", "channels"]: # (h, w, c) -> (c, h, w) shape = (shape[2], shape[0], shape[1]) image_features.append( PolicyFeature( diff --git a/lerobot/configs/policy/act_aloha_real.yaml b/lerobot/configs/policy/act_aloha_real.yaml index 7c8094da19..99d01bb202 100644 --- a/lerobot/configs/policy/act_aloha_real.yaml +++ b/lerobot/configs/policy/act_aloha_real.yaml @@ -12,13 +12,6 @@ # policy=act_aloha_real \ # env=aloha_real # ``` -# -# Example of usage for training and inference with [Dora-rs](https://github.com/dora-rs/dora-lerobot): -# ```bash -# python lerobot/scripts/train.py \ -# policy=act_aloha_real \ -# env=dora_aloha_real -# ``` seed: 1000 dataset_repo_id: lerobot/aloha_static_vinh_cup diff --git a/lerobot/configs/training.py b/lerobot/configs/training.py index c62fa42163..76882bfbee 100644 --- a/lerobot/configs/training.py +++ b/lerobot/configs/training.py @@ -124,7 +124,10 @@ def __post_init__(self): self.policy.pretrained_path = policy_path if not self.job_name: - self.job_name = f"{self.env.type}_{self.policy.type}" + if self.env is None: + self.job_name = f"{self.policy.type}" + else: + self.job_name = f"{self.env.type}_{self.policy.type}" if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir(): raise FileExistsError( @@ -150,11 +153,3 @@ def __get_path_fields__(cls) -> list[Field]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" path_fields = ["policy"] return [f for f in fields(cls) if f.name in path_fields] - - # @property - # def checkpoint_path(self) -> str | Path | None: - # return self._checkpoint_path - - # @checkpoint_path.setter - # def set_checkpoint_path(self, path: str | Path): - # self._checkpoint_path = path diff --git a/lerobot/scripts/control_robot.py b/lerobot/scripts/control_robot.py index 12eaf146f4..73eb107bbb 100644 --- a/lerobot/scripts/control_robot.py +++ b/lerobot/scripts/control_robot.py @@ -8,30 +8,41 @@ - Recalibrate your robot: ```bash -python lerobot/scripts/control_robot.py calibrate +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=calibrate ``` - Unlimited teleoperation at highest frequency (~200 Hz is expected), to exit with CTRL+C: ```bash -python lerobot/scripts/control_robot.py teleoperate +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=teleoperate # Remove the cameras from the robot definition. They are not used in 'teleoperate' anyway. -python lerobot/scripts/control_robot.py teleoperate --robot-overrides '~cameras' +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --robot.cameras='{}' \ + --control.type=teleoperate ``` - Unlimited teleoperation at a limited frequency of 30 Hz, to simulate data recording frequency: ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --fps 30 +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=teleoperate \ + --control.fps=30 ``` - Record one episode in order to test replay: ```bash -python lerobot/scripts/control_robot.py record \ - --fps 30 \ - --repo-id $USER/koch_test \ - --num-episodes 1 \ - --run-compute-stats 0 +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=record \ + --control.fps=30 \ + --control.repo_id=$USER/koch_test \ + --control.num_episodes=1 \ + --control.run_compute_stats=False ``` - Visualize dataset: @@ -44,21 +55,25 @@ - Replay this test episode: ```bash python lerobot/scripts/control_robot.py replay \ - --fps 30 \ - --repo-id $USER/koch_test \ - --episode 0 + --robot.type=so100 \ + --control.type=replay \ + --control.fps 30 \ + --control.repo-id $USER/koch_test \ + --control.episode 0 ``` - Record a full dataset in order to train a policy, with 2 seconds of warmup, 30 seconds of recording for each episode, and 10 seconds to reset the environment in between episodes: ```bash python lerobot/scripts/control_robot.py record \ - --fps 30 \ - --repo-id $USER/koch_pick_place_lego \ - --num-episodes 50 \ - --warmup-time-s 2 \ - --episode-time-s 30 \ - --reset-time-s 10 + --robot.type=so100 \ + --control.type=record \ + --control.fps 30 \ + --control.repo_id=$USER/koch_pick_place_lego \ + --control.num_episodes=50 \ + --control.warmup_time_s=2 \ + --control.episode_time_s=30 \ + --control.reset_time_s=10 ``` **NOTE**: You can use your keyboard to control data recording flow. @@ -73,6 +88,7 @@ - Train on this dataset with the ACT policy: ```bash +# TODO(rcadene): fix python lerobot/scripts/train.py \ policy=act_koch_real \ env=koch_real \ @@ -82,30 +98,38 @@ - Run the pretrained policy on the robot: ```bash -python lerobot/scripts/control_robot.py record \ - --fps 30 \ - --repo-id $USER/eval_act_koch_real \ - --num-episodes 10 \ - --warmup-time-s 2 \ - --episode-time-s 30 \ - --reset-time-s 10 - -p outputs/train/act_koch_real/checkpoints/080000/pretrained_model +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=record \ + --control.fps=30 \ + --control.repo_id=$USER/eval_act_koch_real \ + --control.num_episodes=10 \ + --control.warmup_time_s=2 \ + --control.episode_time_s=30 \ + --control.reset_time_s=10 + --control.pretrained_policy_path=outputs/train/act_koch_real/checkpoints/080000/pretrained_model ``` """ -import argparse import logging import time -from pathlib import Path -from typing import List + +import draccus # from safetensors.torch import load_file, save_file from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.policies.factory import make_policy +from lerobot.common.robot_devices.control_configs import ( + CalibrateControlConfig, + ControlPipelineConfig, + RecordControlConfig, + ReplayControlConfig, + TeleoperateControlConfig, +) from lerobot.common.robot_devices.control_utils import ( control_loop, has_method, init_keyboard_listener, - init_policy, log_control_info, record_episode, reset_environment, @@ -114,10 +138,9 @@ stop_recording, warmup_record, ) -from lerobot.common.robot_devices.robots.factory import make_robot -from lerobot.common.robot_devices.robots.utils import Robot +from lerobot.common.robot_devices.robots.utils import Robot, make_robot_from_config from lerobot.common.robot_devices.utils import busy_wait, safe_disconnect -from lerobot.common.utils.utils import init_hydra_config, init_logging, log_say, none_or_int +from lerobot.common.utils.utils import init_logging, log_say ######################################################################################## # Control modes @@ -125,7 +148,7 @@ @safe_disconnect -def calibrate(robot: Robot, arms: list[str] | None): +def calibrate(robot: Robot, cfg: CalibrateControlConfig): # TODO(aliberts): move this code in robots' classes if robot.robot_type.startswith("stretch"): if not robot.is_connected: @@ -134,9 +157,7 @@ def calibrate(robot: Robot, arms: list[str] | None): robot.home() return - if arms is None: - arms = robot.available_arms - + arms = robot.available_arms if cfg.arms is None else cfg.arms unknown_arms = [arm_id for arm_id in arms if arm_id not in robot.available_arms] available_arms_str = " ".join(robot.available_arms) unknown_arms_str = " ".join(unknown_arms) @@ -171,91 +192,49 @@ def calibrate(robot: Robot, arms: list[str] | None): @safe_disconnect -def teleoperate( - robot: Robot, fps: int | None = None, teleop_time_s: float | None = None, display_cameras: bool = False -): +def teleoperate(robot: Robot, cfg: TeleoperateControlConfig): control_loop( robot, - control_time_s=teleop_time_s, - fps=fps, + control_time_s=cfg.teleop_time_s, + fps=cfg.fps, teleoperate=True, - display_cameras=display_cameras, + display_cameras=cfg.display_cameras, ) @safe_disconnect def record( robot: Robot, - root: Path, - repo_id: str, - single_task: str, - pretrained_policy_name_or_path: str | None = None, - policy_overrides: List[str] | None = None, - fps: int | None = None, - warmup_time_s: int | float = 2, - episode_time_s: int | float = 10, - reset_time_s: int | float = 5, - num_episodes: int = 50, - video: bool = True, - run_compute_stats: bool = True, - push_to_hub: bool = True, - tags: list[str] | None = None, - num_image_writer_processes: int = 0, - num_image_writer_threads_per_camera: int = 4, - display_cameras: bool = True, - play_sounds: bool = True, - resume: bool = False, - # TODO(rcadene, aliberts): remove local_files_only when refactor with dataset as argument - local_files_only: bool = False, + cfg: RecordControlConfig, ) -> LeRobotDataset: # TODO(rcadene): Add option to record logs - listener = None - events = None - policy = None - device = None - use_amp = None - - if single_task: - task = single_task - else: - raise NotImplementedError("Only single-task recording is supported for now") - - # Load pretrained policy - if pretrained_policy_name_or_path is not None: - policy, policy_fps, device, use_amp = init_policy(pretrained_policy_name_or_path, policy_overrides) - - if fps is None: - fps = policy_fps - logging.warning(f"No fps provided, so using the fps from policy config ({policy_fps}).") - elif fps != policy_fps: - logging.warning( - f"There is a mismatch between the provided fps ({fps}) and the one from policy config ({policy_fps})." - ) - - if resume: + if cfg.resume: dataset = LeRobotDataset( - repo_id, - root=root, - local_files_only=local_files_only, + cfg.repo_id, + root=cfg.root, + local_files_only=cfg.local_files_only, ) dataset.start_image_writer( - num_processes=num_image_writer_processes, - num_threads=num_image_writer_threads_per_camera * len(robot.cameras), + num_processes=cfg.num_image_writer_processes, + num_threads=cfg.num_image_writer_threads_per_camera * len(robot.cameras), ) - sanity_check_dataset_robot_compatibility(dataset, robot, fps, video) + sanity_check_dataset_robot_compatibility(dataset, robot, cfg.fps, cfg.video) else: # Create empty dataset or load existing saved episodes - sanity_check_dataset_name(repo_id, policy) + sanity_check_dataset_name(cfg.repo_id, cfg.policy) dataset = LeRobotDataset.create( - repo_id, - fps, - root=root, + cfg.repo_id, + cfg.fps, + root=cfg.root, robot=robot, - use_videos=video, - image_writer_processes=num_image_writer_processes, - image_writer_threads=num_image_writer_threads_per_camera * len(robot.cameras), + use_videos=cfg.video, + image_writer_processes=cfg.num_image_writer_processes, + image_writer_threads=cfg.num_image_writer_threads_per_camera * len(robot.cameras), ) + # Load pretrained policy + policy = None if cfg.policy is None else make_policy(cfg.policy, cfg.device, ds_meta=dataset.meta) + if not robot.is_connected: robot.connect() @@ -266,33 +245,28 @@ def record( # 2. give times to the robot devices to connect and start synchronizing, # 3. place the cameras windows on screen enable_teleoperation = policy is None - log_say("Warmup record", play_sounds) - warmup_record(robot, events, enable_teleoperation, warmup_time_s, display_cameras, fps) + log_say("Warmup record", cfg.play_sounds) + warmup_record(robot, events, enable_teleoperation, cfg.warmup_time_s, cfg.display_cameras, cfg.fps) if has_method(robot, "teleop_safety_stop"): robot.teleop_safety_stop() recorded_episodes = 0 while True: - if recorded_episodes >= num_episodes: + if recorded_episodes >= cfg.num_episodes: break - # TODO(aliberts): add task prompt for multitask here. Might need to temporarily disable event if - # input() messes with them. - # if multi_task: - # task = input("Enter your task description: ") - - log_say(f"Recording episode {dataset.num_episodes}", play_sounds) + log_say(f"Recording episode {dataset.num_episodes}", cfg.play_sounds) record_episode( dataset=dataset, robot=robot, events=events, - episode_time_s=episode_time_s, - display_cameras=display_cameras, + episode_time_s=cfg.episode_time_s, + display_cameras=cfg.display_cameras, policy=policy, - device=device, - use_amp=use_amp, - fps=fps, + device=cfg.device, + use_amp=cfg.use_amp, + fps=cfg.fps, ) # Execute a few seconds without recording to give time to manually reset the environment @@ -300,59 +274,56 @@ def record( # TODO(rcadene): add an option to enable teleoperation during reset # Skip reset for the last episode to be recorded if not events["stop_recording"] and ( - (dataset.num_episodes < num_episodes - 1) or events["rerecord_episode"] + (dataset.num_episodes < cfg.num_episodes - 1) or events["rerecord_episode"] ): - log_say("Reset the environment", play_sounds) - reset_environment(robot, events, reset_time_s) + log_say("Reset the environment", cfg.play_sounds) + reset_environment(robot, events, cfg.reset_time_s) if events["rerecord_episode"]: - log_say("Re-record episode", play_sounds) + log_say("Re-record episode", cfg.play_sounds) events["rerecord_episode"] = False events["exit_early"] = False dataset.clear_episode_buffer() continue - dataset.save_episode(task) + dataset.save_episode(cfg.single_task) recorded_episodes += 1 if events["stop_recording"]: break - log_say("Stop recording", play_sounds, blocking=True) - stop_recording(robot, listener, display_cameras) + log_say("Stop recording", cfg.play_sounds, blocking=True) + stop_recording(robot, listener, cfg.display_cameras) - if run_compute_stats: + if cfg.run_compute_stats: logging.info("Computing dataset statistics") - dataset.consolidate(run_compute_stats) + dataset.consolidate(cfg.run_compute_stats) - if push_to_hub: - dataset.push_to_hub(tags=tags) + if cfg.push_to_hub: + dataset.push_to_hub(tags=cfg.tags, private=cfg.private) - log_say("Exiting", play_sounds) + log_say("Exiting", cfg.play_sounds) return dataset @safe_disconnect def replay( robot: Robot, - root: Path, - repo_id: str, - episode: int, - fps: int | None = None, - play_sounds: bool = True, - local_files_only: bool = False, + cfg: ReplayControlConfig, ): # TODO(rcadene, aliberts): refactor with control_loop, once `dataset` is an instance of LeRobotDataset # TODO(rcadene): Add option to record logs - dataset = LeRobotDataset(repo_id, root=root, episodes=[episode], local_files_only=local_files_only) + dataset = LeRobotDataset( + cfg.repo_id, root=cfg.root, episodes=[cfg.episode], local_files_only=cfg.local_files_only + ) actions = dataset.hf_dataset.select_columns("action") if not robot.is_connected: robot.connect() - log_say("Replaying episode", play_sounds, blocking=True) + log_say("Replaying episode", cfg.play_sounds, blocking=True) for idx in range(dataset.num_frames): start_episode_t = time.perf_counter() @@ -360,216 +331,32 @@ def replay( robot.send_action(action) dt_s = time.perf_counter() - start_episode_t - busy_wait(1 / fps - dt_s) + busy_wait(1 / cfg.fps - dt_s) dt_s = time.perf_counter() - start_episode_t - log_control_info(robot, dt_s, fps=fps) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - subparsers = parser.add_subparsers(dest="mode", required=True) - - # Set common options for all the subparsers - base_parser = argparse.ArgumentParser(add_help=False) - base_parser.add_argument( - "--robot-path", - type=str, - default="lerobot/configs/robot/koch.yaml", - help="Path to robot yaml file used to instantiate the robot using `make_robot` factory function.", - ) - base_parser.add_argument( - "--robot-overrides", - type=str, - nargs="*", - help="Any key=value arguments to override config values (use dots for.nested=overrides)", - ) - - parser_calib = subparsers.add_parser("calibrate", parents=[base_parser]) - parser_calib.add_argument( - "--arms", - type=str, - nargs="*", - help="List of arms to calibrate (e.g. `--arms left_follower right_follower left_leader`)", - ) - - parser_teleop = subparsers.add_parser("teleoperate", parents=[base_parser]) - parser_teleop.add_argument( - "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)" - ) - parser_teleop.add_argument( - "--display-cameras", - type=int, - default=1, - help="Display all cameras on screen (set to 1 to display or 0).", - ) - - parser_record = subparsers.add_parser("record", parents=[base_parser]) - task_args = parser_record.add_mutually_exclusive_group(required=True) - parser_record.add_argument( - "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)" - ) - task_args.add_argument( - "--single-task", - type=str, - help="A short but accurate description of the task performed during the recording.", - ) - # TODO(aliberts): add multi-task support - # task_args.add_argument( - # "--multi-task", - # type=int, - # help="You will need to enter the task performed at the start of each episode.", - # ) - parser_record.add_argument( - "--root", - type=Path, - default=None, - help="Root directory where the dataset will be stored (e.g. 'dataset/path').", - ) - parser_record.add_argument( - "--repo-id", - type=str, - default="lerobot/test", - help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).", - ) - parser_record.add_argument( - "--local-files-only", - type=int, - default=0, - help="Use local files only. By default, this script will try to fetch the dataset from the hub if it exists.", - ) - parser_record.add_argument( - "--warmup-time-s", - type=int, - default=10, - help="Number of seconds before starting data collection. It allows the robot devices to warmup and synchronize.", - ) - parser_record.add_argument( - "--episode-time-s", - type=int, - default=60, - help="Number of seconds for data recording for each episode.", - ) - parser_record.add_argument( - "--reset-time-s", - type=int, - default=60, - help="Number of seconds for resetting the environment after each episode.", - ) - parser_record.add_argument("--num-episodes", type=int, default=50, help="Number of episodes to record.") - parser_record.add_argument( - "--run-compute-stats", - type=int, - default=1, - help="By default, run the computation of the data statistics at the end of data collection. Compute intensive and not required to just replay an episode.", - ) - parser_record.add_argument( - "--push-to-hub", - type=int, - default=1, - help="Upload dataset to Hugging Face hub.", - ) - parser_record.add_argument( - "--tags", - type=str, - nargs="*", - help="Add tags to your dataset on the hub.", - ) - parser_record.add_argument( - "--num-image-writer-processes", - type=int, - default=0, - help=( - "Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only; " - "set to ≥1 to use subprocesses, each using threads to write images. The best number of processes " - "and threads depends on your system. We recommend 4 threads per camera with 0 processes. " - "If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses." - ), - ) - parser_record.add_argument( - "--num-image-writer-threads-per-camera", - type=int, - default=4, - help=( - "Number of threads writing the frames as png images on disk, per camera. " - "Too many threads might cause unstable teleoperation fps due to main thread being blocked. " - "Not enough threads might cause low camera fps." - ), - ) - parser_record.add_argument( - "--resume", - type=int, - default=0, - help="Resume recording on an existing dataset.", - ) - parser_record.add_argument( - "-p", - "--pretrained-policy-name-or-path", - type=str, - help=( - "Either the repo ID of a model hosted on the Hub or a path to a directory containing weights " - "saved using `Policy.save_pretrained`." - ), - ) - parser_record.add_argument( - "--policy-overrides", - type=str, - nargs="*", - help="Any key=value arguments to override config values (use dots for.nested=overrides)", - ) + log_control_info(robot, dt_s, fps=cfg.fps) - parser_replay = subparsers.add_parser("replay", parents=[base_parser]) - parser_replay.add_argument( - "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)" - ) - parser_replay.add_argument( - "--root", - type=Path, - default=None, - help="Root directory where the dataset will be stored (e.g. 'dataset/path').", - ) - parser_replay.add_argument( - "--repo-id", - type=str, - default="lerobot/test", - help="Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).", - ) - parser_replay.add_argument( - "--local-files-only", - type=int, - default=0, - help="Use local files only. By default, this script will try to fetch the dataset from the hub if it exists.", - ) - parser_replay.add_argument("--episode", type=int, default=0, help="Index of the episode to replay.") - - args = parser.parse_args() +@draccus.wrap() +def control_robot(cfg: ControlPipelineConfig): init_logging() - control_mode = args.mode - robot_path = args.robot_path - robot_overrides = args.robot_overrides - kwargs = vars(args) - del kwargs["mode"] - del kwargs["robot_path"] - del kwargs["robot_overrides"] - - robot_cfg = init_hydra_config(robot_path, robot_overrides) - robot = make_robot(robot_cfg) + robot = make_robot_from_config(cfg.robot) - if control_mode == "calibrate": - calibrate(robot, **kwargs) - - elif control_mode == "teleoperate": - teleoperate(robot, **kwargs) - - elif control_mode == "record": - record(robot, **kwargs) - - elif control_mode == "replay": - replay(robot, **kwargs) + if isinstance(cfg.control, CalibrateControlConfig): + calibrate(robot, cfg.control) + elif isinstance(cfg.control, TeleoperateControlConfig): + teleoperate(robot, cfg.control) + elif isinstance(cfg.control, RecordControlConfig): + record(robot, cfg.control) + elif isinstance(cfg.control, ReplayControlConfig): + replay(robot, cfg.control) if robot.is_connected: # Disconnect manually to avoid a "Core dump" during process # termination due to camera threads not properly exiting. robot.disconnect() + + +if __name__ == "__main__": + control_robot() diff --git a/lerobot/scripts/control_sim_robot.py b/lerobot/scripts/control_sim_robot.py index 3a7f2288d5..9955ea52f0 100644 --- a/lerobot/scripts/control_sim_robot.py +++ b/lerobot/scripts/control_sim_robot.py @@ -90,8 +90,7 @@ sanity_check_dataset_robot_compatibility, stop_recording, ) -from lerobot.common.robot_devices.robots.factory import make_robot -from lerobot.common.robot_devices.robots.utils import Robot +from lerobot.common.robot_devices.robots.utils import Robot, make_robot from lerobot.common.robot_devices.utils import busy_wait from lerobot.common.utils.utils import init_hydra_config, init_logging, log_say @@ -227,7 +226,7 @@ def record( shape = env.observation_space[key].shape if not key.startswith("observation.image."): key = "observation.image." + key - features[key] = {"dtype": "video", "names": ["channel", "height", "width"], "shape": shape} + features[key] = {"dtype": "video", "names": ["channels", "height", "width"], "shape": shape} for key, obs_key in state_keys_dict.items(): features[key] = { @@ -515,6 +514,7 @@ def env_constructor(): if control_mode in ["teleoperate", "record"]: # make robot robot_overrides = ["~cameras", "~follower_arms"] + # TODO(rcadene): remove robot_cfg = init_hydra_config(robot_path, robot_overrides) robot = make_robot(robot_cfg) robot.connect() diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index cc24301b9a..4ac9b1c9ca 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -204,7 +204,7 @@ def train(cfg: TrainPipelineConfig): # On real-world data, no need to create an environment as evaluations are done outside train.py, # using the eval.py instead, with gym_dora environment and dora-rs. eval_env = None - if cfg.eval_freq > 0 and cfg.env.type != "real_world": + if cfg.eval_freq > 0 or cfg.env is None: logging.info("Creating env") eval_env = make_env(cfg.env, n_envs=cfg.eval.batch_size) @@ -265,10 +265,10 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): # needed (choose 6 as a minimum for consistency without being overkill). logger.save_checkpoint( step, + step_identifier, policy, optimizer, lr_scheduler, - identifier=step_identifier, ) logging.info("Resume training") diff --git a/lerobot/scripts/visualize_image_transforms.py b/lerobot/scripts/visualize_image_transforms.py index f9fb5c08aa..727fe1788b 100644 --- a/lerobot/scripts/visualize_image_transforms.py +++ b/lerobot/scripts/visualize_image_transforms.py @@ -18,142 +18,102 @@ This script will generate examples of transformed images as they are output by LeRobot dataset. Additionally, each individual transform can be visualized separately as well as examples of combined transforms - ---- Usage Examples --- - -Increase hue jitter -``` -python lerobot/scripts/visualize_image_transforms.py \ - dataset_repo_id=lerobot/aloha_mobile_shrimp \ - training.image_transforms.hue.min_max="[-0.25,0.25]" -``` - -Increase brightness & brightness weight -``` -python lerobot/scripts/visualize_image_transforms.py \ - dataset_repo_id=lerobot/aloha_mobile_shrimp \ - training.image_transforms.brightness.weight=10.0 \ - training.image_transforms.brightness.min_max="[1.0,2.0]" -``` - -Blur images and disable saturation & hue -``` -python lerobot/scripts/visualize_image_transforms.py \ - dataset_repo_id=lerobot/aloha_mobile_shrimp \ - training.image_transforms.sharpness.weight=10.0 \ - training.image_transforms.sharpness.min_max="[0.0,1.0]" \ - training.image_transforms.saturation.weight=0.0 \ - training.image_transforms.hue.weight=0.0 -``` - -Use all transforms with random order -``` +Example: +```bash python lerobot/scripts/visualize_image_transforms.py \ - dataset_repo_id=lerobot/aloha_mobile_shrimp \ - training.image_transforms.max_num_transforms=5 \ - training.image_transforms.random_order=true + --repo_id=lerobot/pusht \ + --episodes='[0]' \ + --image_transforms.enable=True ``` - """ +import logging +from copy import deepcopy +from dataclasses import replace from pathlib import Path -import hydra +import draccus from torchvision.transforms import ToPILImage from lerobot.common.datasets.lerobot_dataset import LeRobotDataset -from lerobot.common.datasets.transforms import get_image_transforms +from lerobot.common.datasets.transforms import ( + ImageTransforms, + ImageTransformsConfig, + make_transform_from_config, +) +from lerobot.configs.default import DatasetConfig OUTPUT_DIR = Path("outputs/image_transforms") to_pil = ToPILImage() -def save_config_all_transforms(cfg, original_frame, output_dir, n_examples): - tf = get_image_transforms( - brightness_weight=cfg.brightness.weight, - brightness_min_max=cfg.brightness.min_max, - contrast_weight=cfg.contrast.weight, - contrast_min_max=cfg.contrast.min_max, - saturation_weight=cfg.saturation.weight, - saturation_min_max=cfg.saturation.min_max, - hue_weight=cfg.hue.weight, - hue_min_max=cfg.hue.min_max, - sharpness_weight=cfg.sharpness.weight, - sharpness_min_max=cfg.sharpness.min_max, - max_num_transforms=cfg.max_num_transforms, - random_order=cfg.random_order, - ) - +def save_all_transforms(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples): output_dir_all = output_dir / "all" output_dir_all.mkdir(parents=True, exist_ok=True) + tfs = ImageTransforms(cfg) for i in range(1, n_examples + 1): - transformed_frame = tf(original_frame) + transformed_frame = tfs(original_frame) to_pil(transformed_frame).save(output_dir_all / f"{i}.png", quality=100) print("Combined transforms examples saved to:") print(f" {output_dir_all}") -def save_config_single_transforms(cfg, original_frame, output_dir, n_examples): - transforms = [ - "brightness", - "contrast", - "saturation", - "hue", - "sharpness", - ] +def save_each_transform(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples): + if not cfg.enable: + logging.warning( + "No single transforms will be saved, because `image_transforms.enable=False`. To enable, set `enable` to True in `ImageTransformsConfig` or in the command line with `--image_transforms.enable=True`." + ) + return + print("Individual transforms examples saved to:") - for transform in transforms: - # Apply one transformation with random value in min_max range - kwargs = { - f"{transform}_weight": cfg[f"{transform}"].weight, - f"{transform}_min_max": cfg[f"{transform}"].min_max, - } - tf = get_image_transforms(**kwargs) - output_dir_single = output_dir / f"{transform}" + for tf_name, tf_cfg in cfg.tfs.items(): + # Apply a few transformation with random value in min_max range + output_dir_single = output_dir / tf_name output_dir_single.mkdir(parents=True, exist_ok=True) + tf = make_transform_from_config(tf_cfg) for i in range(1, n_examples + 1): transformed_frame = tf(original_frame) to_pil(transformed_frame).save(output_dir_single / f"{i}.png", quality=100) - # Apply min transformation - min_value, max_value = cfg[f"{transform}"].min_max - kwargs = { - f"{transform}_weight": cfg[f"{transform}"].weight, - f"{transform}_min_max": (min_value, min_value), - } - tf = get_image_transforms(**kwargs) - transformed_frame = tf(original_frame) - to_pil(transformed_frame).save(output_dir_single / "min.png", quality=100) - - # Apply max transformation - kwargs = { - f"{transform}_weight": cfg[f"{transform}"].weight, - f"{transform}_min_max": (max_value, max_value), - } - tf = get_image_transforms(**kwargs) - transformed_frame = tf(original_frame) - to_pil(transformed_frame).save(output_dir_single / "max.png", quality=100) - - # Apply mean transformation - mean_value = (min_value + max_value) / 2 - kwargs = { - f"{transform}_weight": cfg[f"{transform}"].weight, - f"{transform}_min_max": (mean_value, mean_value), - } - tf = get_image_transforms(**kwargs) - transformed_frame = tf(original_frame) - to_pil(transformed_frame).save(output_dir_single / "mean.png", quality=100) + # Apply min, max, average transformations + tf_cfg_kwgs_min = deepcopy(tf_cfg.kwargs) + tf_cfg_kwgs_max = deepcopy(tf_cfg.kwargs) + tf_cfg_kwgs_avg = deepcopy(tf_cfg.kwargs) + + for key, (min_, max_) in tf_cfg.kwargs.items(): + avg = (min_ + max_) / 2 + tf_cfg_kwgs_min[key] = [min_, min_] + tf_cfg_kwgs_max[key] = [max_, max_] + tf_cfg_kwgs_avg[key] = [avg, avg] + + tf_min = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_min})) + tf_max = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_max})) + tf_avg = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_avg})) + + tf_frame_min = tf_min(original_frame) + tf_frame_max = tf_max(original_frame) + tf_frame_avg = tf_avg(original_frame) + + to_pil(tf_frame_min).save(output_dir_single / "min.png", quality=100) + to_pil(tf_frame_max).save(output_dir_single / "max.png", quality=100) + to_pil(tf_frame_avg).save(output_dir_single / "mean.png", quality=100) print(f" {output_dir_single}") -def visualize_transforms(cfg, output_dir: Path, n_examples: int = 5): - dataset = LeRobotDataset(cfg.dataset_repo_id) +@draccus.wrap() +def visualize_image_transforms(cfg: DatasetConfig, output_dir: Path = OUTPUT_DIR, n_examples: int = 5): + dataset = LeRobotDataset( + repo_id=cfg.repo_id, + episodes=cfg.episodes, + local_files_only=cfg.local_files_only, + video_backend=cfg.video_backend, + ) - output_dir = output_dir / cfg.dataset_repo_id.split("/")[-1] + output_dir = output_dir / cfg.repo_id.split("/")[-1] output_dir.mkdir(parents=True, exist_ok=True) # Get 1st frame from 1st camera of 1st episode @@ -162,14 +122,9 @@ def visualize_transforms(cfg, output_dir: Path, n_examples: int = 5): print("\nOriginal frame saved to:") print(f" {output_dir / 'original_frame.png'}.") - save_config_all_transforms(cfg.training.image_transforms, original_frame, output_dir, n_examples) - save_config_single_transforms(cfg.training.image_transforms, original_frame, output_dir, n_examples) - - -@hydra.main(version_base="1.2", config_name="default", config_path="../configs") -def visualize_transforms_cli(cfg): - visualize_transforms(cfg, output_dir=OUTPUT_DIR) + save_all_transforms(cfg.image_transforms, original_frame, output_dir, n_examples) + save_each_transform(cfg.image_transforms, original_frame, output_dir, n_examples) if __name__ == "__main__": - visualize_transforms_cli() + visualize_image_transforms() diff --git a/tests/conftest.py b/tests/conftest.py index 2075c2aa65..b49bebd926 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,8 +20,8 @@ from serial import SerialException from lerobot import available_cameras, available_motors, available_robots -from lerobot.common.utils.utils import init_hydra_config -from tests.utils import DEVICE, ROBOT_CONFIG_PATH_TEMPLATE, make_camera, make_motors_bus +from lerobot.common.robot_devices.robots.utils import make_robot +from tests.utils import DEVICE, make_camera, make_motors_bus # Import fixture modules as plugins pytest_plugins = [ @@ -43,11 +43,7 @@ def is_robot_available(robot_type): ) try: - from lerobot.common.robot_devices.robots.factory import make_robot - - config_path = ROBOT_CONFIG_PATH_TEMPLATE.format(robot=robot_type) - robot_cfg = init_hydra_config(config_path) - robot = make_robot(robot_cfg) + robot = make_robot(robot_type) robot.connect() del robot return True diff --git a/tests/scripts/save_dataset_to_safetensors.py b/tests/scripts/save_dataset_to_safetensors.py index b2f4969fcb..84c8f169ac 100644 --- a/tests/scripts/save_dataset_to_safetensors.py +++ b/tests/scripts/save_dataset_to_safetensors.py @@ -43,6 +43,7 @@ def save_dataset_to_safetensors(output_dir, repo_id="lerobot/pusht"): repo_dir.mkdir(parents=True, exist_ok=True) dataset = LeRobotDataset( repo_id=repo_id, + episodes=[0], ) # save 2 first frames of first episode diff --git a/tests/scripts/save_image_transforms_to_safetensors.py b/tests/scripts/save_image_transforms_to_safetensors.py index 1fa194e50a..8a011a22cc 100644 --- a/tests/scripts/save_image_transforms_to_safetensors.py +++ b/tests/scripts/save_image_transforms_to_safetensors.py @@ -19,29 +19,21 @@ from safetensors.torch import save_file from lerobot.common.datasets.lerobot_dataset import LeRobotDataset -from lerobot.common.datasets.transforms import get_image_transforms -from lerobot.common.utils.utils import init_hydra_config, seeded_context -from tests.test_image_transforms import ARTIFACT_DIR, DATASET_REPO_ID -from tests.utils import DEFAULT_CONFIG_PATH +from lerobot.common.datasets.transforms import ( + ImageTransformConfig, + ImageTransforms, + ImageTransformsConfig, + make_transform_from_config, +) +from lerobot.common.utils.utils import seeded_context + +ARTIFACT_DIR = Path("tests/data/save_image_transforms_to_safetensors") +DATASET_REPO_ID = "lerobot/aloha_mobile_shrimp" def save_default_config_transform(original_frame: torch.Tensor, output_dir: Path): - cfg = init_hydra_config(DEFAULT_CONFIG_PATH) - cfg_tf = cfg.training.image_transforms - default_tf = get_image_transforms( - brightness_weight=cfg_tf.brightness.weight, - brightness_min_max=cfg_tf.brightness.min_max, - contrast_weight=cfg_tf.contrast.weight, - contrast_min_max=cfg_tf.contrast.min_max, - saturation_weight=cfg_tf.saturation.weight, - saturation_min_max=cfg_tf.saturation.min_max, - hue_weight=cfg_tf.hue.weight, - hue_min_max=cfg_tf.hue.min_max, - sharpness_weight=cfg_tf.sharpness.weight, - sharpness_min_max=cfg_tf.sharpness.min_max, - max_num_transforms=cfg_tf.max_num_transforms, - random_order=cfg_tf.random_order, - ) + cfg = ImageTransformsConfig(enable=True) + default_tf = ImageTransforms(cfg) with seeded_context(1337): img_tf = default_tf(original_frame) @@ -51,29 +43,26 @@ def save_default_config_transform(original_frame: torch.Tensor, output_dir: Path def save_single_transforms(original_frame: torch.Tensor, output_dir: Path): transforms = { - "brightness": [(0.5, 0.5), (2.0, 2.0)], - "contrast": [(0.5, 0.5), (2.0, 2.0)], - "saturation": [(0.5, 0.5), (2.0, 2.0)], - "hue": [(-0.25, -0.25), (0.25, 0.25)], - "sharpness": [(0.5, 0.5), (2.0, 2.0)], + ("ColorJitter", "brightness", [(0.5, 0.5), (2.0, 2.0)]), + ("ColorJitter", "contrast", [(0.5, 0.5), (2.0, 2.0)]), + ("ColorJitter", "saturation", [(0.5, 0.5), (2.0, 2.0)]), + ("ColorJitter", "hue", [(-0.25, -0.25), (0.25, 0.25)]), + ("SharpnessJitter", "sharpness", [(0.5, 0.5), (2.0, 2.0)]), } frames = {"original_frame": original_frame} - for transform, values in transforms.items(): - for min_max in values: - kwargs = { - f"{transform}_weight": 1.0, - f"{transform}_min_max": min_max, - } - tf = get_image_transforms(**kwargs) - key = f"{transform}_{min_max[0]}_{min_max[1]}" + for tf_type, tf_name, min_max_values in transforms.items(): + for min_max in min_max_values: + tf_cfg = ImageTransformConfig(type=tf_type, kwargs={tf_name: min_max}) + tf = make_transform_from_config(tf_cfg) + key = f"{tf_name}_{min_max[0]}_{min_max[1]}" frames[key] = tf(original_frame) save_file(frames, output_dir / "single_transforms.safetensors") def main(): - dataset = LeRobotDataset(DATASET_REPO_ID, image_transforms=None) + dataset = LeRobotDataset(DATASET_REPO_ID, episodes=[0], image_transforms=None) output_dir = Path(ARTIFACT_DIR) output_dir.mkdir(parents=True, exist_ok=True) original_frame = dataset[0][dataset.meta.camera_keys[0]] diff --git a/tests/scripts/save_policy_to_safetensors.py b/tests/scripts/save_policy_to_safetensors.py index 29d0ae19de..381afebfa8 100644 --- a/tests/scripts/save_policy_to_safetensors.py +++ b/tests/scripts/save_policy_to_safetensors.py @@ -20,32 +20,34 @@ from safetensors.torch import save_file from lerobot.common.datasets.factory import make_dataset -from lerobot.common.policies.factory import make_policy -from lerobot.common.utils.utils import init_hydra_config, set_global_seed -from lerobot.scripts.train import make_optimizer_and_scheduler -from tests.utils import DEFAULT_CONFIG_PATH - - -def get_policy_stats(env_name, policy_name, extra_overrides): - cfg = init_hydra_config( - DEFAULT_CONFIG_PATH, - overrides=[ - f"env={env_name}", - f"policy={policy_name}", - "device=cpu", - ] - + extra_overrides, - ) +from lerobot.common.optim.factory import make_optimizer_and_scheduler +from lerobot.common.policies.factory import make_policy, make_policy_config +from lerobot.common.utils.utils import set_global_seed +from lerobot.configs.default import DatasetConfig +from lerobot.configs.training import TrainPipelineConfig + + +def get_policy_stats(ds_repo_id, env_name, policy_name, policy_kwargs, train_kwargs): + # TODO(rcadene, aliberts): env_name? set_global_seed(1337) - dataset = make_dataset(cfg) - policy = make_policy(cfg, dataset_stats=dataset.meta.stats) + + train_cfg = TrainPipelineConfig( + # TODO(rcadene, aliberts): remove dataset download + dataset=DatasetConfig(repo_id=ds_repo_id, episodes=[0]), + policy=make_policy_config(policy_name, **policy_kwargs), + device="cpu", + **train_kwargs, + ) + + dataset = make_dataset(train_cfg) + policy = make_policy(train_cfg.policy, ds_meta=dataset.meta, device=train_cfg.device) policy.train() - optimizer, _ = make_optimizer_and_scheduler(cfg, policy) + optimizer, _ = make_optimizer_and_scheduler(train_cfg, policy) dataloader = torch.utils.data.DataLoader( dataset, num_workers=0, - batch_size=cfg.training.batch_size, + batch_size=train_cfg.batch_size, shuffle=False, ) @@ -72,24 +74,28 @@ def get_policy_stats(env_name, policy_name, extra_overrides): optimizer.zero_grad() policy.reset() - # HACK: We reload a batch with no delta_timestamps as `select_action` won't expect a timestamps dimension - dataset.delta_timestamps = None + # HACK: We reload a batch with no delta_indices as `select_action` won't expect a timestamps dimension + # We simulate having an environment using a dataset by setting delta_indices to None and dropping tensors + # indicating padding (those ending with "_is_pad") + dataset.delta_indices = None batch = next(iter(dataloader)) obs = {} for k in batch: + if k.endswith("_is_pad"): + continue if k.startswith("observation"): obs[k] = batch[k] - if "n_action_steps" in cfg.policy: - actions_queue = cfg.policy.n_action_steps + if hasattr(train_cfg.policy, "n_action_steps"): + actions_queue = train_cfg.policy.n_action_steps else: - actions_queue = cfg.policy.n_action_repeats + actions_queue = train_cfg.policy.n_action_repeats actions = {str(i): policy.select_action(obs).contiguous() for i in range(actions_queue)} return output_dict, grad_stats, param_stats, actions -def save_policy_to_safetensors(output_dir, env_name, policy_name, extra_overrides, file_name_extra): +def save_policy_to_safetensors(output_dir, env_name, policy_name, policy_kwargs, file_name_extra): env_policy_dir = Path(output_dir) / f"{env_name}_{policy_name}{file_name_extra}" if env_policy_dir.exists(): @@ -99,7 +105,7 @@ def save_policy_to_safetensors(output_dir, env_name, policy_name, extra_override shutil.rmtree(env_policy_dir) env_policy_dir.mkdir(parents=True, exist_ok=True) - output_dict, grad_stats, param_stats, actions = get_policy_stats(env_name, policy_name, extra_overrides) + output_dict, grad_stats, param_stats, actions = get_policy_stats(env_name, policy_name, policy_kwargs) save_file(output_dict, env_policy_dir / "output_dict.safetensors") save_file(grad_stats, env_policy_dir / "grad_stats.safetensors") save_file(param_stats, env_policy_dir / "param_stats.safetensors") @@ -108,26 +114,31 @@ def save_policy_to_safetensors(output_dir, env_name, policy_name, extra_override if __name__ == "__main__": env_policies = [ - # ("xarm", "tdmpc", ["policy.use_mpc=false"], "use_policy"), - # ("xarm", "tdmpc", ["policy.use_mpc=true"], "use_mpc"), - # ( - # "pusht", - # "diffusion", - # [ - # "policy.n_action_steps=8", - # "policy.num_inference_steps=10", - # "policy.down_dims=[128, 256, 512]", - # ], - # "", - # ), - # ("aloha", "act", ["policy.n_action_steps=10"], ""), - # ("aloha", "act", ["policy.n_action_steps=1000", "policy.chunk_size=1000"], "_1000_steps"), - # ("dora_aloha_real", "act_real", ["policy.n_action_steps=10"], ""), - # ("dora_aloha_real", "act_real_no_state", ["policy.n_action_steps=10"], ""), + ("lerobot/xarm_lift_medium", "xarm", "tdmpc", {"use_mpc": False}, "use_policy"), + ("lerobot/xarm_lift_medium", "xarm", "tdmpc", {"use_mpc": True}, "use_mpc"), + ( + "lerobot/pusht", + "pusht", + "diffusion", + { + "n_action_steps": 8, + "num_inference_steps": 10, + "down_dims": [128, 256, 512], + }, + "", + ), + ("lerobot/aloha_sim_insertion_human", "aloha", "act", {"n_action_steps": 10}, ""), + ( + "lerobot/aloha_sim_insertion_human", + "aloha", + "act", + {"n_action_steps": 1000, "chunk_size": 1000}, + "_1000_steps", + ), ] if len(env_policies) == 0: raise RuntimeError("No policies were provided!") - for env, policy, extra_overrides, file_name_extra in env_policies: + for ds_repo_id, env, policy, policy_kwargs, file_name_extra in env_policies: save_policy_to_safetensors( - "tests/data/save_policy_to_safetensors", env, policy, extra_overrides, file_name_extra + "tests/data/save_policy_to_safetensors", ds_repo_id, env, policy, policy_kwargs, file_name_extra ) diff --git a/tests/test_cameras.py b/tests/test_cameras.py index 67512779ad..1a1812f791 100644 --- a/tests/test_cameras.py +++ b/tests/test_cameras.py @@ -51,8 +51,10 @@ def test_camera(request, camera_type, mock): if camera_type == "opencv" and not mock: pytest.skip("TODO(rcadene): fix test for opencv physical camera") + camera_kwargs = {"camera_type": camera_type, "mock": mock} + # Test instantiating - camera = make_camera(camera_type, mock=mock) + camera = make_camera(**camera_kwargs) # Test reading, async reading, disconnecting before connecting raises an error with pytest.raises(RobotDeviceNotConnectedError): @@ -66,7 +68,7 @@ def test_camera(request, camera_type, mock): del camera # Test connecting - camera = make_camera(camera_type, mock=mock) + camera = make_camera(**camera_kwargs) camera.connect() assert camera.is_connected assert camera.fps is not None @@ -106,12 +108,12 @@ def test_camera(request, camera_type, mock): assert camera.thread is None # Test disconnecting with `__del__` - camera = make_camera(camera_type, mock=mock) + camera = make_camera(**camera_kwargs) camera.connect() del camera # Test acquiring a bgr image - camera = make_camera(camera_type, color_mode="bgr", mock=mock) + camera = make_camera(**camera_kwargs, color_mode="bgr") camera.connect() assert camera.color_mode == "bgr" bgr_color_image = camera.read() @@ -121,13 +123,13 @@ def test_camera(request, camera_type, mock): del camera # Test acquiring a rotated image - camera = make_camera(camera_type, mock=mock) + camera = make_camera(**camera_kwargs) camera.connect() ori_color_image = camera.read() del camera for rotation in [None, 90, 180, -90]: - camera = make_camera(camera_type, rotation=rotation, mock=mock) + camera = make_camera(**camera_kwargs, rotation=rotation) camera.connect() if mock: @@ -159,7 +161,7 @@ def test_camera(request, camera_type, mock): # TODO(rcadene): Add a test for a camera that supports fps=60 # Test width and height can be set - camera = make_camera(camera_type, fps=30, width=1280, height=720, mock=mock) + camera = make_camera(**camera_kwargs, fps=30, width=1280, height=720) camera.connect() assert camera.fps == 30 assert camera.width == 1280 @@ -172,7 +174,7 @@ def test_camera(request, camera_type, mock): del camera # Test not supported width and height raise an error - camera = make_camera(camera_type, fps=30, width=0, height=0, mock=mock) + camera = make_camera(**camera_kwargs, fps=30, width=0, height=0) with pytest.raises(OSError): camera.connect() del camera diff --git a/tests/test_control_robot.py b/tests/test_control_robot.py index 8df1089463..2d460b360a 100644 --- a/tests/test_control_robot.py +++ b/tests/test_control_robot.py @@ -30,17 +30,27 @@ import pytest from lerobot.common.logger import Logger +from lerobot.common.policies.act.configuration_act import ACTConfig from lerobot.common.policies.factory import make_policy -from lerobot.common.utils.utils import init_hydra_config +from lerobot.common.robot_devices.control_configs import ( + CalibrateControlConfig, + RecordControlConfig, + ReplayControlConfig, + TeleoperateControlConfig, +) +from lerobot.configs.default import DatasetConfig +from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.training import TrainPipelineConfig from lerobot.scripts.control_robot import calibrate, record, replay, teleoperate -from lerobot.scripts.train import make_optimizer_and_scheduler from tests.test_robots import make_robot -from tests.utils import DEFAULT_CONFIG_PATH, DEVICE, TEST_ROBOT_TYPES, mock_calibration_dir, require_robot +from tests.utils import DEVICE, TEST_ROBOT_TYPES, mock_calibration_dir, require_robot @pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES) @require_robot def test_teleoperate(tmpdir, request, robot_type, mock): + robot_kwargs = {"robot_type": robot_type, "mock": mock} + if mock and robot_type != "aloha": request.getfixturevalue("patch_builtins_input") @@ -49,39 +59,44 @@ def test_teleoperate(tmpdir, request, robot_type, mock): tmpdir = Path(tmpdir) calibration_dir = tmpdir / robot_type mock_calibration_dir(calibration_dir) - overrides = [f"calibration_dir={calibration_dir}"] + robot_kwargs["calibration_dir"] = calibration_dir else: # Use the default .cache/calibration folder when mock=False - overrides = None + pass - robot = make_robot(robot_type, overrides=overrides, mock=mock) - teleoperate(robot, teleop_time_s=1) - teleoperate(robot, fps=30, teleop_time_s=1) - teleoperate(robot, fps=60, teleop_time_s=1) + robot = make_robot(**robot_kwargs) + teleoperate(robot, TeleoperateControlConfig(teleop_time_s=1)) + teleoperate(robot, TeleoperateControlConfig(fps=30, teleop_time_s=1)) + teleoperate(robot, TeleoperateControlConfig(fps=60, teleop_time_s=1)) del robot @pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES) @require_robot def test_calibrate(tmpdir, request, robot_type, mock): + robot_kwargs = {"robot_type": robot_type, "mock": mock} + if mock: request.getfixturevalue("patch_builtins_input") # Create an empty calibration directory to trigger manual calibration tmpdir = Path(tmpdir) calibration_dir = tmpdir / robot_type - overrides_calibration_dir = [f"calibration_dir={calibration_dir}"] + robot_kwargs["calibration_dir"] = calibration_dir - robot = make_robot(robot_type, overrides=overrides_calibration_dir, mock=mock) - calibrate(robot, arms=robot.available_arms) + robot = make_robot(**robot_kwargs) + calib_cfg = CalibrateControlConfig(arms=robot.available_arms) + calibrate(robot, calib_cfg) del robot @pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES) @require_robot def test_record_without_cameras(tmpdir, request, robot_type, mock): + robot_kwargs = {"robot_type": robot_type, "mock": mock} + # Avoid using cameras - overrides = ["~cameras"] + robot_kwargs["cameras"] = {} if mock and robot_type != "aloha": request.getfixturevalue("patch_builtins_input") @@ -90,33 +105,38 @@ def test_record_without_cameras(tmpdir, request, robot_type, mock): # and avoid writing calibration files in user .cache/calibration folder calibration_dir = Path(tmpdir) / robot_type mock_calibration_dir(calibration_dir) - overrides.append(f"calibration_dir={calibration_dir}") + robot_kwargs["calibration_dir"] = calibration_dir + else: + # Use the default .cache/calibration folder when mock=False + pass repo_id = "lerobot/debug" root = Path(tmpdir) / "data" / repo_id single_task = "Do something." - robot = make_robot(robot_type, overrides=overrides, mock=mock) - record( - robot, - fps=30, - root=root, + robot = make_robot(**robot_kwargs) + rec_cfg = RecordControlConfig( repo_id=repo_id, single_task=single_task, - warmup_time_s=1, + root=root, + fps=30, + warmup_time_s=0.1, episode_time_s=1, + reset_time_s=0.1, num_episodes=2, run_compute_stats=False, push_to_hub=False, video=False, play_sounds=False, ) + record(robot, rec_cfg) @pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES) @require_robot def test_record_and_replay_and_policy(tmpdir, request, robot_type, mock): tmpdir = Path(tmpdir) + robot_kwargs = {"robot_type": robot_type, "mock": mock} if mock and robot_type != "aloha": request.getfixturevalue("patch_builtins_input") @@ -125,28 +145,24 @@ def test_record_and_replay_and_policy(tmpdir, request, robot_type, mock): # and avoid writing calibration files in user .cache/calibration folder calibration_dir = tmpdir / robot_type mock_calibration_dir(calibration_dir) - overrides = [f"calibration_dir={calibration_dir}"] + robot_kwargs["calibration_dir"] = calibration_dir else: - # Use the default .cache/calibration folder when mock=False or for aloha - overrides = None - - env_name = "koch_real" - policy_name = "act_koch_real" + # Use the default .cache/calibration folder when mock=False + pass - repo_id = "lerobot/debug" + repo_id = "lerobot_test/debug" root = tmpdir / "data" / repo_id single_task = "Do something." - robot = make_robot(robot_type, overrides=overrides, mock=mock) - dataset = record( - robot, - root, - repo_id, - single_task, + robot = make_robot(**robot_kwargs) + rec_cfg = RecordControlConfig( + repo_id=repo_id, + single_task=single_task, + root=root, fps=1, - warmup_time_s=0.5, + warmup_time_s=0.1, episode_time_s=1, - reset_time_s=1, + reset_time_s=0.1, num_episodes=2, push_to_hub=False, # TODO(rcadene, aliberts): test video=True @@ -155,56 +171,34 @@ def test_record_and_replay_and_policy(tmpdir, request, robot_type, mock): display_cameras=False, play_sounds=False, ) + dataset = record(robot, rec_cfg) assert dataset.meta.total_episodes == 2 assert len(dataset) == 2 - replay(robot, episode=0, fps=1, root=root, repo_id=repo_id, play_sounds=False, local_files_only=True) - - # TODO(rcadene, aliberts): rethink this design - if robot_type == "aloha": - env_name = "aloha_real" - policy_name = "act_aloha_real" - elif robot_type in ["koch", "koch_bimanual"]: - env_name = "koch_real" - policy_name = "act_koch_real" - elif robot_type == "so100": - env_name = "so100_real" - policy_name = "act_so100_real" - elif robot_type == "moss": - env_name = "moss_real" - policy_name = "act_moss_real" - else: - raise NotImplementedError(robot_type) - - overrides = [ - f"env={env_name}", - f"policy={policy_name}", - f"device={DEVICE}", - ] + replay_cfg = ReplayControlConfig( + episode=0, fps=1, root=root, repo_id=repo_id, play_sounds=False, local_files_only=True + ) + replay(robot, replay_cfg) - if robot_type == "koch_bimanual": - overrides += ["env.state_dim=12", "env.action_dim=12"] + policy_cfg = ACTConfig() + policy = make_policy(policy_cfg, ds_meta=dataset.meta, device=DEVICE) - overrides += ["wandb.enable=false"] - overrides += ["env.fps=1"] + out_dir = tmpdir / "logger" - cfg = init_hydra_config( - DEFAULT_CONFIG_PATH, - overrides=overrides, + ds_cfg = DatasetConfig(repo_id, local_files_only=True) + train_cfg = TrainPipelineConfig( + dataset=ds_cfg, + policy=policy_cfg, + output_dir=out_dir, + device=DEVICE, ) - - policy = make_policy(hydra_cfg=cfg, dataset_stats=dataset.meta.stats) - optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy) - out_dir = tmpdir / "logger" - logger = Logger(cfg, out_dir, wandb_job_name="debug") + logger = Logger(train_cfg) logger.save_checkpoint( - 0, - policy, - optimizer, - lr_scheduler, + train_step=0, identifier=0, + policy=policy, ) - pretrained_policy_name_or_path = out_dir / "checkpoints/last/pretrained_model" + pretrained_policy_path = out_dir / "checkpoints/last/pretrained_model" # In `examples/9_use_aloha.md`, we advise using `num_image_writer_processes=1` # during inference, to reach constent fps, so we test this here. @@ -230,15 +224,14 @@ def test_record_and_replay_and_policy(tmpdir, request, robot_type, mock): eval_repo_id = "lerobot/eval_debug" eval_root = tmpdir / "data" / eval_repo_id - dataset = record( - robot, - eval_root, - eval_repo_id, - single_task, - pretrained_policy_name_or_path, - warmup_time_s=1, + rec_eval_cfg = RecordControlConfig( + repo_id=eval_repo_id, + root=eval_root, + single_task=single_task, + fps=1, + warmup_time_s=0.1, episode_time_s=1, - reset_time_s=1, + reset_time_s=0.1, num_episodes=2, run_compute_stats=False, push_to_hub=False, @@ -246,8 +239,14 @@ def test_record_and_replay_and_policy(tmpdir, request, robot_type, mock): display_cameras=False, play_sounds=False, num_image_writer_processes=num_image_writer_processes, + device=DEVICE, + use_amp=False, ) + rec_eval_cfg.policy = PretrainedConfig.from_pretrained(pretrained_policy_path) + rec_eval_cfg.policy.pretrained_path = pretrained_policy_path + + dataset = record(robot, rec_eval_cfg) assert dataset.num_episodes == 2 assert len(dataset) == 2 @@ -257,6 +256,8 @@ def test_record_and_replay_and_policy(tmpdir, request, robot_type, mock): @pytest.mark.parametrize("robot_type, mock", [("koch", True)]) @require_robot def test_resume_record(tmpdir, request, robot_type, mock): + robot_kwargs = {"robot_type": robot_type, "mock": mock} + if mock and robot_type != "aloha": request.getfixturevalue("patch_builtins_input") @@ -264,48 +265,50 @@ def test_resume_record(tmpdir, request, robot_type, mock): # and avoid writing calibration files in user .cache/calibration folder calibration_dir = tmpdir / robot_type mock_calibration_dir(calibration_dir) - overrides = [f"calibration_dir={calibration_dir}"] + robot_kwargs["calibration_dir"] = calibration_dir else: - # Use the default .cache/calibration folder when mock=False or for aloha - overrides = [] + # Use the default .cache/calibration folder when mock=False + pass - robot = make_robot(robot_type, overrides=overrides, mock=mock) + robot = make_robot(**robot_kwargs) repo_id = "lerobot/debug" root = Path(tmpdir) / "data" / repo_id single_task = "Do something." - record_kwargs = { - "robot": robot, - "root": root, - "repo_id": repo_id, - "single_task": single_task, - "fps": 1, - "warmup_time_s": 0, - "episode_time_s": 1, - "push_to_hub": False, - "video": False, - "display_cameras": False, - "play_sounds": False, - "run_compute_stats": False, - "local_files_only": True, - "num_episodes": 1, - } - - dataset = record(**record_kwargs) + rec_cfg = RecordControlConfig( + repo_id=repo_id, + root=root, + single_task=single_task, + fps=1, + warmup_time_s=0, + episode_time_s=1, + push_to_hub=False, + video=False, + display_cameras=False, + play_sounds=False, + run_compute_stats=False, + local_files_only=True, + num_episodes=1, + ) + + dataset = record(robot, rec_cfg) assert len(dataset) == 1, f"`dataset` should contain 1 frame, not {len(dataset)}" with pytest.raises(FileExistsError): # Dataset already exists, but resume=False by default - record(**record_kwargs) + record(robot, rec_cfg) - dataset = record(**record_kwargs, resume=True) + rec_cfg.resume = True + dataset = record(robot, rec_cfg) assert len(dataset) == 2, f"`dataset` should contain 2 frames, not {len(dataset)}" @pytest.mark.parametrize("robot_type, mock", [("koch", True)]) @require_robot def test_record_with_event_rerecord_episode(tmpdir, request, robot_type, mock): + robot_kwargs = {"robot_type": robot_type, "mock": mock} + if mock and robot_type != "aloha": request.getfixturevalue("patch_builtins_input") @@ -313,12 +316,13 @@ def test_record_with_event_rerecord_episode(tmpdir, request, robot_type, mock): # and avoid writing calibration files in user .cache/calibration folder calibration_dir = tmpdir / robot_type mock_calibration_dir(calibration_dir) - overrides = [f"calibration_dir={calibration_dir}"] + robot_kwargs["calibration_dir"] = calibration_dir else: - # Use the default .cache/calibration folder when mock=False or for aloha - overrides = [] + # Use the default .cache/calibration folder when mock=False + pass + + robot = make_robot(**robot_kwargs) - robot = make_robot(robot_type, overrides=overrides, mock=mock) with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener: mock_events = {} mock_events["exit_early"] = True @@ -330,11 +334,10 @@ def test_record_with_event_rerecord_episode(tmpdir, request, robot_type, mock): root = Path(tmpdir) / "data" / repo_id single_task = "Do something." - dataset = record( - robot, - root, - repo_id, - single_task, + rec_cfg = RecordControlConfig( + repo_id=repo_id, + root=root, + single_task=single_task, fps=1, warmup_time_s=0, episode_time_s=1, @@ -345,6 +348,7 @@ def test_record_with_event_rerecord_episode(tmpdir, request, robot_type, mock): play_sounds=False, run_compute_stats=False, ) + dataset = record(robot, rec_cfg) assert not mock_events["rerecord_episode"], "`rerecord_episode` wasn't properly reset to False" assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False" @@ -354,6 +358,8 @@ def test_record_with_event_rerecord_episode(tmpdir, request, robot_type, mock): @pytest.mark.parametrize("robot_type, mock", [("koch", True)]) @require_robot def test_record_with_event_exit_early(tmpdir, request, robot_type, mock): + robot_kwargs = {"robot_type": robot_type, "mock": mock} + if mock: request.getfixturevalue("patch_builtins_input") @@ -361,12 +367,13 @@ def test_record_with_event_exit_early(tmpdir, request, robot_type, mock): # and avoid writing calibration files in user .cache/calibration folder calibration_dir = tmpdir / robot_type mock_calibration_dir(calibration_dir) - overrides = [f"calibration_dir={calibration_dir}"] + robot_kwargs["calibration_dir"] = calibration_dir else: - # Use the default .cache/calibration folder when mock=False or for aloha - overrides = [] + # Use the default .cache/calibration folder when mock=False + pass + + robot = make_robot(**robot_kwargs) - robot = make_robot(robot_type, overrides=overrides, mock=mock) with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener: mock_events = {} mock_events["exit_early"] = True @@ -378,12 +385,11 @@ def test_record_with_event_exit_early(tmpdir, request, robot_type, mock): root = Path(tmpdir) / "data" / repo_id single_task = "Do something." - dataset = record( - robot, - fps=2, + rec_cfg = RecordControlConfig( + repo_id=repo_id, root=root, single_task=single_task, - repo_id=repo_id, + fps=2, warmup_time_s=0, episode_time_s=1, num_episodes=1, @@ -394,6 +400,8 @@ def test_record_with_event_exit_early(tmpdir, request, robot_type, mock): run_compute_stats=False, ) + dataset = record(robot, rec_cfg) + assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False" assert len(dataset) == 1, "`dataset` should contain only 1 frame" @@ -403,6 +411,8 @@ def test_record_with_event_exit_early(tmpdir, request, robot_type, mock): ) @require_robot def test_record_with_event_stop_recording(tmpdir, request, robot_type, mock, num_image_writer_processes): + robot_kwargs = {"robot_type": robot_type, "mock": mock} + if mock: request.getfixturevalue("patch_builtins_input") @@ -410,12 +420,13 @@ def test_record_with_event_stop_recording(tmpdir, request, robot_type, mock, num # and avoid writing calibration files in user .cache/calibration folder calibration_dir = tmpdir / robot_type mock_calibration_dir(calibration_dir) - overrides = [f"calibration_dir={calibration_dir}"] + robot_kwargs["calibration_dir"] = calibration_dir else: - # Use the default .cache/calibration folder when mock=False or for aloha - overrides = [] + # Use the default .cache/calibration folder when mock=False + pass + + robot = make_robot(**robot_kwargs) - robot = make_robot(robot_type, overrides=overrides, mock=mock) with patch("lerobot.scripts.control_robot.init_keyboard_listener") as mock_listener: mock_events = {} mock_events["exit_early"] = True @@ -427,14 +438,14 @@ def test_record_with_event_stop_recording(tmpdir, request, robot_type, mock, num root = Path(tmpdir) / "data" / repo_id single_task = "Do something." - dataset = record( - robot, - root, - repo_id, + rec_cfg = RecordControlConfig( + repo_id=repo_id, + root=root, single_task=single_task, fps=1, warmup_time_s=0, episode_time_s=1, + reset_time_s=0.1, num_episodes=2, push_to_hub=False, video=False, @@ -444,5 +455,7 @@ def test_record_with_event_stop_recording(tmpdir, request, robot_type, mock, num num_image_writer_processes=num_image_writer_processes, ) + dataset = record(robot, rec_cfg) + assert not mock_events["exit_early"], "`exit_early` wasn't properly reset to False" assert len(dataset) == 1, "`dataset` should contain only 1 frame" diff --git a/tests/test_datasets.py b/tests/test_datasets.py index 9f3615871f..56adf07475 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -43,9 +43,14 @@ hf_transform_to_torch, unflatten_dict, ) -from lerobot.common.utils.utils import init_hydra_config, seeded_context +from lerobot.common.envs.factory import make_env_config +from lerobot.common.policies.factory import make_policy_config +from lerobot.common.robot_devices.robots.utils import make_robot +from lerobot.common.utils.utils import seeded_context +from lerobot.configs.default import DatasetConfig +from lerobot.configs.training import TrainPipelineConfig from tests.fixtures.constants import DUMMY_REPO_ID -from tests.utils import DEFAULT_CONFIG_PATH, DEVICE, make_robot +from tests.utils import DEVICE, require_x86_64_kernel def test_same_attributes_defined(lerobot_dataset_factory, tmp_path): @@ -98,11 +103,13 @@ def test_dataset_initialization(lerobot_dataset_factory, tmp_path): # - [ ] test smaller methods -@pytest.mark.skip("TODO after v2 migration / removing hydra") @pytest.mark.parametrize( "env_name, repo_id, policy_name", - lerobot.env_dataset_policy_triplets - + [("aloha", ["lerobot/aloha_sim_insertion_human", "lerobot/aloha_sim_transfer_cube_human"], "act")], + # Single dataset + lerobot.env_dataset_policy_triplets, + # Multi-dataset + # TODO after fix multidataset + # + [("aloha", ["lerobot/aloha_sim_insertion_human", "lerobot/aloha_sim_transfer_cube_human"], "act")], ) def test_factory(env_name, repo_id, policy_name): """ @@ -110,15 +117,14 @@ def test_factory(env_name, repo_id, policy_name): - we can create a dataset with the factory. - for a commonly used set of data keys, the data dimensions are correct. """ - cfg = init_hydra_config( - DEFAULT_CONFIG_PATH, - overrides=[ - f"env={env_name}", - f"dataset_repo_id={repo_id}", - f"policy={policy_name}", - f"device={DEVICE}", - ], + cfg = TrainPipelineConfig( + # TODO(rcadene, aliberts): remove dataset download + dataset=DatasetConfig(repo_id=repo_id, episodes=[0]), + env=make_env_config(env_name), + policy=make_policy_config(policy_name), + device=DEVICE, ) + dataset = make_dataset(cfg) delta_timestamps = dataset.delta_timestamps camera_keys = dataset.meta.camera_keys @@ -171,8 +177,8 @@ def test_factory(env_name, repo_id, policy_name): # TODO(alexander-soare): If you're hunting for savings on testing time, this takes about 5 seconds. -@pytest.mark.skip("TODO after v2 migration / removing hydra") -def test_multilerobotdataset_frames(): +@pytest.mark.skip("TODO after fix multidataset") +def test_multidataset_frames(): """Check that all dataset frames are incorporated.""" # Note: use the image variants of the dataset to make the test approx 3x faster. # Note: We really do need three repo_ids here as at some point this caught an issue with the chaining @@ -205,14 +211,14 @@ def test_multilerobotdataset_frames(): # TODO(aliberts, rcadene): Refactor and move this to a tests/test_compute_stats.py -@pytest.mark.skip("TODO after v2 migration / removing hydra") def test_compute_stats_on_xarm(): """Check that the statistics are computed correctly according to the stats_patterns property. We compare with taking a straight min, mean, max, std of all the data in one pass (which we can do because we are working with a small dataset). """ - dataset = LeRobotDataset("lerobot/xarm_lift_medium") + # TODO(rcadene, aliberts): remove dataset download + dataset = LeRobotDataset("lerobot/xarm_lift_medium", episodes=[0]) # reduce size of dataset sample on which stats compute is tested to 10 frames dataset.hf_dataset = dataset.hf_dataset.select(range(10)) @@ -289,7 +295,6 @@ def test_flatten_unflatten_dict(): assert json.dumps(original_d, sort_keys=True) == json.dumps(d, sort_keys=True), f"{original_d} != {d}" -@pytest.mark.skip("TODO after v2 migration / removing hydra") @pytest.mark.parametrize( "repo_id", [ @@ -301,11 +306,12 @@ def test_flatten_unflatten_dict(): # "lerobot/cmu_stretch", ], ) -# TODO(rcadene, aliberts): all these tests fail locally on Mac M1, but not on Linux +@require_x86_64_kernel def test_backward_compatibility(repo_id): """The artifacts for this test have been generated by `tests/scripts/save_dataset_to_safetensors.py`.""" - dataset = LeRobotDataset(repo_id) + # TODO(rcadene, aliberts): remove dataset download + dataset = LeRobotDataset(repo_id, episodes=[0]) test_dir = Path("tests/data/save_dataset_to_safetensors") / repo_id @@ -318,6 +324,11 @@ def load_and_compare(i): new_frame.pop("language_instruction", None) old_frame.pop("language_instruction", None) + # Remove task_index to allow for backward compatibility + # TODO(rcadene): remove when new features have been generated + if "task_index" not in old_frame: + del new_frame["task_index"] + new_keys = set(new_frame.keys()) old_keys = set(old_frame.keys()) assert new_keys == old_keys, f"{new_keys=} and {old_keys=} are not the same" @@ -361,8 +372,8 @@ def load_and_compare(i): # load_and_compare(i - 1) -@pytest.mark.skip("TODO after v2 migration / removing hydra") -def test_aggregate_stats(): +@pytest.mark.skip("TODO after fix multidataset") +def test_multidataset_aggregate_stats(): """Makes 3 basic datasets and checks that aggregate stats are computed correctly.""" with seeded_context(0): data_a = torch.rand(30, dtype=torch.float32) diff --git a/tests/test_delta_timestamps.py b/tests/test_delta_timestamps.py index 3c2e307f8a..3516583daf 100644 --- a/tests/test_delta_timestamps.py +++ b/tests/test_delta_timestamps.py @@ -54,8 +54,10 @@ def _create_slightly_off_hf_dataset(fps: int = 30, tolerance_s: float = 1e-4) -> @pytest.fixture(scope="module") def valid_delta_timestamps_factory(): - def _create_valid_delta_timestamps(fps: int = 30, keys: list = DUMMY_MOTOR_FEATURES) -> dict: - delta_timestamps = {key: [i * (1 / fps) for i in range(-10, 10)] for key in keys} + def _create_valid_delta_timestamps( + fps: int = 30, keys: list = DUMMY_MOTOR_FEATURES, min_max_range: tuple[int, int] = (-10, 10) + ) -> dict: + delta_timestamps = {key: [i * (1 / fps) for i in range(*min_max_range)] for key in keys} return delta_timestamps return _create_valid_delta_timestamps @@ -91,8 +93,11 @@ def _create_slightly_off_delta_timestamps( @pytest.fixture(scope="module") -def delta_indices(keys: list = DUMMY_MOTOR_FEATURES) -> dict: - return {key: list(range(-10, 10)) for key in keys} +def delta_indices_factory(): + def _delta_indices(keys: list = DUMMY_MOTOR_FEATURES, min_max_range: tuple[int, int] = (-10, 10)) -> dict: + return {key: list(range(*min_max_range)) for key in keys} + + return _delta_indices def test_check_timestamps_sync_synced(synced_hf_dataset_factory): @@ -248,9 +253,10 @@ def test_check_delta_timestamps_empty(): assert result is True -def test_delta_indices(valid_delta_timestamps_factory, delta_indices): - fps = 30 - delta_timestamps = valid_delta_timestamps_factory(fps) - expected_delta_indices = delta_indices +def test_delta_indices(valid_delta_timestamps_factory, delta_indices_factory): + fps = 50 + min_max_range = (-100, 100) + delta_timestamps = valid_delta_timestamps_factory(fps, min_max_range=min_max_range) + expected_delta_indices = delta_indices_factory(min_max_range=min_max_range) actual_delta_indices = get_delta_indices(delta_timestamps, fps) assert expected_delta_indices == actual_delta_indices diff --git a/tests/test_envs.py b/tests/test_envs.py index aec9999da3..c7c384db5a 100644 --- a/tests/test_envs.py +++ b/tests/test_envs.py @@ -21,11 +21,10 @@ from gymnasium.utils.env_checker import check_env import lerobot -from lerobot.common.envs.factory import make_env +from lerobot.common.envs.factory import make_env, make_env_config from lerobot.common.envs.utils import preprocess_observation -from lerobot.common.utils.utils import init_hydra_config -from .utils import DEFAULT_CONFIG_PATH, DEVICE, require_env +from .utils import require_env OBS_TYPES = ["state", "pixels", "pixels_agent_pos"] @@ -47,11 +46,7 @@ def test_env(env_name, env_task, obs_type): @pytest.mark.parametrize("env_name", lerobot.available_envs) @require_env def test_factory(env_name): - cfg = init_hydra_config( - DEFAULT_CONFIG_PATH, - overrides=[f"env={env_name}", f"device={DEVICE}"], - ) - + cfg = make_env_config(env_name) env = make_env(cfg, n_envs=1) obs, _ = env.reset() obs = preprocess_observation(obs) diff --git a/tests/test_image_transforms.py b/tests/test_image_transforms.py index 8b1a0f4b11..72a4ee6e3e 100644 --- a/tests/test_image_transforms.py +++ b/tests/test_image_transforms.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pathlib import Path import pytest import torch @@ -21,13 +20,21 @@ from torchvision.transforms import v2 from torchvision.transforms.v2 import functional as F # noqa: N812 -from lerobot.common.datasets.transforms import RandomSubsetApply, SharpnessJitter, get_image_transforms -from lerobot.common.utils.utils import init_hydra_config, seeded_context -from lerobot.scripts.visualize_image_transforms import visualize_transforms -from tests.utils import DEFAULT_CONFIG_PATH, require_x86_64_kernel - -ARTIFACT_DIR = Path("tests/data/save_image_transforms_to_safetensors") -DATASET_REPO_ID = "lerobot/aloha_mobile_shrimp" +from lerobot.common.datasets.transforms import ( + ImageTransformConfig, + ImageTransforms, + ImageTransformsConfig, + RandomSubsetApply, + SharpnessJitter, + make_transform_from_config, +) +from lerobot.common.utils.utils import seeded_context +from lerobot.scripts.visualize_image_transforms import ( + save_all_transforms, + save_each_transform, +) +from tests.scripts.save_image_transforms_to_safetensors import ARTIFACT_DIR +from tests.utils import require_x86_64_kernel @pytest.fixture @@ -44,21 +51,38 @@ def single_transforms(): return load_file(ARTIFACT_DIR / "single_transforms.safetensors") +@pytest.fixture +def img_tensor(single_transforms): + return single_transforms["original_frame"] + + @pytest.fixture def default_transforms(): return load_file(ARTIFACT_DIR / "default_transforms.safetensors") -def test_get_image_transforms_no_transform(img_tensor_factory): +def test_get_image_transforms_no_transform_enable_false(img_tensor_factory): img_tensor = img_tensor_factory() - tf_actual = get_image_transforms(brightness_min_max=(0.5, 0.5), max_num_transforms=0) + tf_cfg = ImageTransformsConfig() # default is enable=False + tf_actual = ImageTransforms(tf_cfg) + torch.testing.assert_close(tf_actual(img_tensor), img_tensor) + + +def test_get_image_transforms_no_transform_max_num_transforms_0(img_tensor_factory): + img_tensor = img_tensor_factory() + tf_cfg = ImageTransformsConfig(enable=True, max_num_transforms=0) + tf_actual = ImageTransforms(tf_cfg) torch.testing.assert_close(tf_actual(img_tensor), img_tensor) @pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)]) def test_get_image_transforms_brightness(img_tensor_factory, min_max): img_tensor = img_tensor_factory() - tf_actual = get_image_transforms(brightness_weight=1.0, brightness_min_max=min_max) + tf_cfg = ImageTransformsConfig( + enable=True, + tfs={"brightness": ImageTransformConfig(type="ColorJitter", kwargs={"brightness": min_max})}, + ) + tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.ColorJitter(brightness=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @@ -66,7 +90,10 @@ def test_get_image_transforms_brightness(img_tensor_factory, min_max): @pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)]) def test_get_image_transforms_contrast(img_tensor_factory, min_max): img_tensor = img_tensor_factory() - tf_actual = get_image_transforms(contrast_weight=1.0, contrast_min_max=min_max) + tf_cfg = ImageTransformsConfig( + enable=True, tfs={"contrast": ImageTransformConfig(type="ColorJitter", kwargs={"contrast": min_max})} + ) + tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.ColorJitter(contrast=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @@ -74,7 +101,11 @@ def test_get_image_transforms_contrast(img_tensor_factory, min_max): @pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)]) def test_get_image_transforms_saturation(img_tensor_factory, min_max): img_tensor = img_tensor_factory() - tf_actual = get_image_transforms(saturation_weight=1.0, saturation_min_max=min_max) + tf_cfg = ImageTransformsConfig( + enable=True, + tfs={"saturation": ImageTransformConfig(type="ColorJitter", kwargs={"saturation": min_max})}, + ) + tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.ColorJitter(saturation=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @@ -82,7 +113,10 @@ def test_get_image_transforms_saturation(img_tensor_factory, min_max): @pytest.mark.parametrize("min_max", [(-0.25, -0.25), (0.25, 0.25)]) def test_get_image_transforms_hue(img_tensor_factory, min_max): img_tensor = img_tensor_factory() - tf_actual = get_image_transforms(hue_weight=1.0, hue_min_max=min_max) + tf_cfg = ImageTransformsConfig( + enable=True, tfs={"hue": ImageTransformConfig(type="ColorJitter", kwargs={"hue": min_max})} + ) + tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.ColorJitter(hue=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @@ -90,21 +124,49 @@ def test_get_image_transforms_hue(img_tensor_factory, min_max): @pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)]) def test_get_image_transforms_sharpness(img_tensor_factory, min_max): img_tensor = img_tensor_factory() - tf_actual = get_image_transforms(sharpness_weight=1.0, sharpness_min_max=min_max) + tf_cfg = ImageTransformsConfig( + enable=True, + tfs={"sharpness": ImageTransformConfig(type="SharpnessJitter", kwargs={"sharpness": min_max})}, + ) + tf_actual = ImageTransforms(tf_cfg) tf_expected = SharpnessJitter(sharpness=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) def test_get_image_transforms_max_num_transforms(img_tensor_factory): img_tensor = img_tensor_factory() - tf_actual = get_image_transforms( - brightness_min_max=(0.5, 0.5), - contrast_min_max=(0.5, 0.5), - saturation_min_max=(0.5, 0.5), - hue_min_max=(0.5, 0.5), - sharpness_min_max=(0.5, 0.5), - random_order=False, + tf_cfg = ImageTransformsConfig( + enable=True, + max_num_transforms=5, + tfs={ + "brightness": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"brightness": (0.5, 0.5)}, + ), + "contrast": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"contrast": (0.5, 0.5)}, + ), + "saturation": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"saturation": (0.5, 0.5)}, + ), + "hue": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"hue": (0.5, 0.5)}, + ), + "sharpness": ImageTransformConfig( + weight=1.0, + type="SharpnessJitter", + kwargs={"sharpness": (0.5, 0.5)}, + ), + }, ) + tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.Compose( [ v2.ColorJitter(brightness=(0.5, 0.5)), @@ -121,68 +183,79 @@ def test_get_image_transforms_max_num_transforms(img_tensor_factory): def test_get_image_transforms_random_order(img_tensor_factory): out_imgs = [] img_tensor = img_tensor_factory() - tf = get_image_transforms( - brightness_min_max=(0.5, 0.5), - contrast_min_max=(0.5, 0.5), - saturation_min_max=(0.5, 0.5), - hue_min_max=(0.5, 0.5), - sharpness_min_max=(0.5, 0.5), + tf_cfg = ImageTransformsConfig( + enable=True, random_order=True, + tfs={ + "brightness": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"brightness": (0.5, 0.5)}, + ), + "contrast": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"contrast": (0.5, 0.5)}, + ), + "saturation": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"saturation": (0.5, 0.5)}, + ), + "hue": ImageTransformConfig( + weight=1.0, + type="ColorJitter", + kwargs={"hue": (0.5, 0.5)}, + ), + "sharpness": ImageTransformConfig( + weight=1.0, + type="SharpnessJitter", + kwargs={"sharpness": (0.5, 0.5)}, + ), + }, ) - with seeded_context(1337): + tf = ImageTransforms(tf_cfg) + + with seeded_context(1338): for _ in range(10): out_imgs.append(tf(img_tensor)) + tmp_img_tensor = img_tensor + for sub_tf in tf.tf.selected_transforms: + tmp_img_tensor = sub_tf(tmp_img_tensor) + torch.testing.assert_close(tmp_img_tensor, out_imgs[-1]) + for i in range(1, len(out_imgs)): with pytest.raises(AssertionError): torch.testing.assert_close(out_imgs[0], out_imgs[i]) -@pytest.mark.skip("TODO after v2 migration / removing hydra") @pytest.mark.parametrize( - "transform, min_max_values", + "tf_type, tf_name, min_max_values", [ - ("brightness", [(0.5, 0.5), (2.0, 2.0)]), - ("contrast", [(0.5, 0.5), (2.0, 2.0)]), - ("saturation", [(0.5, 0.5), (2.0, 2.0)]), - ("hue", [(-0.25, -0.25), (0.25, 0.25)]), - ("sharpness", [(0.5, 0.5), (2.0, 2.0)]), + ("ColorJitter", "brightness", [(0.5, 0.5), (2.0, 2.0)]), + ("ColorJitter", "contrast", [(0.5, 0.5), (2.0, 2.0)]), + ("ColorJitter", "saturation", [(0.5, 0.5), (2.0, 2.0)]), + ("ColorJitter", "hue", [(-0.25, -0.25), (0.25, 0.25)]), + ("SharpnessJitter", "sharpness", [(0.5, 0.5), (2.0, 2.0)]), ], ) -def test_backward_compatibility_torchvision(img_tensor_factory, transform, min_max_values, single_transforms): - img_tensor = img_tensor_factory() +def test_backward_compatibility_single_transforms( + img_tensor, tf_type, tf_name, min_max_values, single_transforms +): for min_max in min_max_values: - kwargs = { - f"{transform}_weight": 1.0, - f"{transform}_min_max": min_max, - } - tf = get_image_transforms(**kwargs) + tf_cfg = ImageTransformConfig(type=tf_type, kwargs={tf_name: min_max}) + tf = make_transform_from_config(tf_cfg) actual = tf(img_tensor) - key = f"{transform}_{min_max[0]}_{min_max[1]}" + key = f"{tf_name}_{min_max[0]}_{min_max[1]}" expected = single_transforms[key] torch.testing.assert_close(actual, expected) -@pytest.mark.skip("TODO after v2 migration / removing hydra") @require_x86_64_kernel -def test_backward_compatibility_default_config(img_tensor_factory, default_transforms): - img_tensor = img_tensor_factory() - cfg = init_hydra_config(DEFAULT_CONFIG_PATH) - cfg_tf = cfg.training.image_transforms - default_tf = get_image_transforms( - brightness_weight=cfg_tf.brightness.weight, - brightness_min_max=cfg_tf.brightness.min_max, - contrast_weight=cfg_tf.contrast.weight, - contrast_min_max=cfg_tf.contrast.min_max, - saturation_weight=cfg_tf.saturation.weight, - saturation_min_max=cfg_tf.saturation.min_max, - hue_weight=cfg_tf.hue.weight, - hue_min_max=cfg_tf.hue.min_max, - sharpness_weight=cfg_tf.sharpness.weight, - sharpness_min_max=cfg_tf.sharpness.min_max, - max_num_transforms=cfg_tf.max_num_transforms, - random_order=cfg_tf.random_order, - ) +def test_backward_compatibility_default_config(img_tensor, default_transforms): + cfg = ImageTransformsConfig(enable=True) + default_tf = ImageTransforms(cfg) with seeded_context(1337): actual = default_tf(img_tensor) @@ -260,26 +333,36 @@ def test_sharpness_jitter_invalid_range_max_smaller(): SharpnessJitter((2.0, 0.1)) -@pytest.mark.skip("TODO after v2 migration / removing hydra") -@pytest.mark.parametrize( - "repo_id, n_examples", - [ - ("lerobot/aloha_sim_transfer_cube_human", 3), - ], -) -def test_visualize_image_transforms(repo_id, n_examples): - cfg = init_hydra_config(DEFAULT_CONFIG_PATH, overrides=[f"dataset_repo_id={repo_id}"]) - output_dir = Path(__file__).parent / "outputs" / "image_transforms" - visualize_transforms(cfg, output_dir=output_dir, n_examples=n_examples) - output_dir = output_dir / repo_id.split("/")[-1] +def test_save_all_transforms(img_tensor_factory, tmp_path): + img_tensor = img_tensor_factory() + tf_cfg = ImageTransformsConfig(enable=True) + n_examples = 3 - # Check if the original frame image exists - assert (output_dir / "original_frame.png").exists(), "Original frame image was not saved." + save_all_transforms(tf_cfg, img_tensor, tmp_path, n_examples) + + # Check if the combined transforms directory exists and contains the right files + combined_transforms_dir = tmp_path / "all" + assert combined_transforms_dir.exists(), "Combined transforms directory was not created." + assert any( + combined_transforms_dir.iterdir() + ), "No transformed images found in combined transforms directory." + for i in range(1, n_examples + 1): + assert ( + combined_transforms_dir / f"{i}.png" + ).exists(), f"Combined transform image {i}.png was not found." + + +def test_save_each_transform(img_tensor_factory, tmp_path): + img_tensor = img_tensor_factory() + tf_cfg = ImageTransformsConfig(enable=True) + n_examples = 3 + + save_each_transform(tf_cfg, img_tensor, tmp_path, n_examples) # Check if the transformed images exist for each transform type transforms = ["brightness", "contrast", "saturation", "hue", "sharpness"] for transform in transforms: - transform_dir = output_dir / transform + transform_dir = tmp_path / transform assert transform_dir.exists(), f"{transform} directory was not created." assert any(transform_dir.iterdir()), f"No transformed images found in {transform} directory." @@ -289,14 +372,3 @@ def test_visualize_image_transforms(repo_id, n_examples): assert ( transform_dir / file_name ).exists(), f"{file_name} was not found in {transform} directory." - - # Check if the combined transforms directory exists and contains the right files - combined_transforms_dir = output_dir / "all" - assert combined_transforms_dir.exists(), "Combined transforms directory was not created." - assert any( - combined_transforms_dir.iterdir() - ), "No transformed images found in combined transforms directory." - for i in range(1, n_examples + 1): - assert ( - combined_transforms_dir / f"{i}.png" - ).exists(), f"Combined transform image {i}.png was not found." diff --git a/tests/test_policies.py b/tests/test_policies.py index ae35674339..bfb82d024d 100644 --- a/tests/test_policies.py +++ b/tests/test_policies.py @@ -26,67 +26,107 @@ from lerobot import available_policies from lerobot.common.datasets.factory import make_dataset from lerobot.common.datasets.utils import cycle -from lerobot.common.envs.factory import make_env +from lerobot.common.envs.factory import make_env, make_env_config from lerobot.common.envs.utils import preprocess_observation +from lerobot.common.optim.factory import make_optimizer_and_scheduler from lerobot.common.policies.act.modeling_act import ACTTemporalEnsembler from lerobot.common.policies.factory import ( - _policy_cfg_from_hydra_cfg, - get_policy_and_config_classes, + get_policy_class, make_policy, + make_policy_config, ) from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.policy_protocol import Policy -from lerobot.common.utils.utils import init_hydra_config, seeded_context -from lerobot.scripts.train import make_optimizer_and_scheduler +from lerobot.common.utils.utils import seeded_context +from lerobot.configs.default import DatasetConfig +from lerobot.configs.policies import PolicyFeature +from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.types import FeatureType, NormalizationMode from tests.scripts.save_policy_to_safetensors import get_policy_stats -from tests.utils import DEFAULT_CONFIG_PATH, DEVICE, require_cpu, require_env, require_x86_64_kernel +from tests.utils import DEVICE, require_cpu, require_env, require_x86_64_kernel + + +@pytest.fixture +def dummy_dataset_metadata(lerobot_dataset_metadata_factory, info_factory, tmp_path): + # Create only one camera input which is squared to fit all current policy constraints + # e.g. vqbet and tdmpc works with one camera only, and tdmpc requires it to be squared + camera_features = { + "observation.images.laptop": { + "shape": (84, 84, 3), + "names": ["height", "width", "channels"], + "info": None, + }, + } + motor_features = { + "action": { + "dtype": "float32", + "shape": (6,), + "names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"], + }, + "observation.state": { + "dtype": "float32", + "shape": (6,), + "names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"], + }, + } + info = info_factory( + total_episodes=1, total_frames=1, camera_features=camera_features, motor_features=motor_features + ) + ds_meta = lerobot_dataset_metadata_factory(root=tmp_path / "init", info=info) + return ds_meta @pytest.mark.parametrize("policy_name", available_policies) def test_get_policy_and_config_classes(policy_name: str): """Check that the correct policy and config classes are returned.""" - policy_cls, config_cls = get_policy_and_config_classes(policy_name) + policy_cls = get_policy_class(policy_name) + policy_cfg = make_policy_config(policy_name) assert policy_cls.name == policy_name - assert issubclass(config_cls, inspect.signature(policy_cls.__init__).parameters["config"].annotation) + assert issubclass( + policy_cfg.__class__, inspect.signature(policy_cls.__init__).parameters["config"].annotation + ) -@pytest.mark.skip("TODO after v2 migration / removing hydra") @pytest.mark.parametrize( - "env_name,policy_name,extra_overrides", + "ds_repo_id,env_name,env_kwargs,policy_name,policy_kwargs", [ - ("xarm", "tdmpc", ["policy.use_mpc=true", "dataset_repo_id=lerobot/xarm_lift_medium"]), - ("pusht", "diffusion", []), - ("pusht", "vqbet", []), - ("aloha", "act", ["env.task=AlohaInsertion-v0", "dataset_repo_id=lerobot/aloha_sim_insertion_human"]), + ("lerobot/xarm_lift_medium", "xarm", {}, "tdmpc", {"use_mpc": True}), + ("lerobot/pusht", "pusht", {}, "diffusion", {}), + ("lerobot/pusht", "pusht", {}, "vqbet", {}), + ("lerobot/pusht", "pusht", {}, "act", {}), + ("lerobot/aloha_sim_insertion_human", "aloha", {"task": "AlohaInsertion-v0"}, "act", {}), ( + "lerobot/aloha_sim_insertion_scripted", "aloha", + {"task": "AlohaInsertion-v0"}, "act", - ["env.task=AlohaInsertion-v0", "dataset_repo_id=lerobot/aloha_sim_insertion_scripted"], + {}, ), ( + "lerobot/aloha_sim_insertion_human", "aloha", - "act", - ["env.task=AlohaTransferCube-v0", "dataset_repo_id=lerobot/aloha_sim_transfer_cube_human"], + {"task": "AlohaInsertion-v0"}, + "diffusion", + {}, ), ( + "lerobot/aloha_sim_transfer_cube_human", "aloha", + {"task": "AlohaTransferCube-v0"}, "act", - ["env.task=AlohaTransferCube-v0", "dataset_repo_id=lerobot/aloha_sim_transfer_cube_scripted"], + {}, ), - # Note: these parameters also need custom logic in the test function for overriding the Hydra config. ( + "lerobot/aloha_sim_transfer_cube_scripted", "aloha", - "diffusion", - ["env.task=AlohaInsertion-v0", "dataset_repo_id=lerobot/aloha_sim_insertion_human"], + {"task": "AlohaTransferCube-v0"}, + "act", + {}, ), - # Note: these parameters also need custom logic in the test function for overriding the Hydra config. - ("pusht", "act", ["env.task=PushT-v0", "dataset_repo_id=lerobot/pusht"]), - ("dora_aloha_real", "act_real", []), - ("dora_aloha_real", "act_real_no_state", []), ], ) @require_env -def test_policy(env_name, policy_name, extra_overrides): +def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs): """ Tests: - Making the policy object. @@ -99,44 +139,18 @@ def test_policy(env_name, policy_name, extra_overrides): Note: We test various combinations of policy and dataset. The combinations are by no means exhaustive, and for now we add tests as we see fit. """ - cfg = init_hydra_config( - DEFAULT_CONFIG_PATH, - overrides=[ - f"env={env_name}", - f"policy={policy_name}", - f"device={DEVICE}", - ] - + extra_overrides, - ) - # Additional config override logic. - if env_name == "aloha" and policy_name == "diffusion": - for keys in [ - ("training", "delta_timestamps"), - ("policy", "input_shapes"), - ("policy", "input_normalization_modes"), - ]: - dct = dict(cfg[keys[0]][keys[1]]) - dct["observation.images.top"] = dct["observation.image"] - del dct["observation.image"] - cfg[keys[0]][keys[1]] = dct - cfg.override_dataset_stats = None - - # Additional config override logic. - if env_name == "pusht" and policy_name == "act": - for keys in [ - ("policy", "input_shapes"), - ("policy", "input_normalization_modes"), - ]: - dct = dict(cfg[keys[0]][keys[1]]) - dct["observation.image"] = dct["observation.images.top"] - del dct["observation.images.top"] - cfg[keys[0]][keys[1]] = dct - cfg.override_dataset_stats = None + train_cfg = TrainPipelineConfig( + # TODO(rcadene, aliberts): remove dataset download + dataset=DatasetConfig(repo_id=ds_repo_id, episodes=[0]), + policy=make_policy_config(policy_name, **policy_kwargs), + env=make_env_config(env_name, **env_kwargs), + device=DEVICE, + ) # Check that we can make the policy object. - dataset = make_dataset(cfg) - policy = make_policy(hydra_cfg=cfg, dataset_stats=dataset.meta.stats) + dataset = make_dataset(train_cfg) + policy = make_policy(train_cfg.policy, ds_meta=dataset.meta, device=DEVICE) # Check that the policy follows the required protocol. assert isinstance( policy, Policy @@ -145,7 +159,7 @@ def test_policy(env_name, policy_name, extra_overrides): assert isinstance(policy, PyTorchModelHubMixin) # Check that we run select_actions and get the appropriate output. - env = make_env(cfg, n_envs=2) + env = make_env(train_cfg.env, n_envs=2) dataloader = torch.utils.data.DataLoader( dataset, @@ -172,7 +186,7 @@ def test_policy(env_name, policy_name, extra_overrides): # reset the policy and environment policy.reset() - observation, _ = env.reset(seed=cfg.seed) + observation, _ = env.reset(seed=train_cfg.seed) # apply transform to normalize the observations observation = preprocess_observation(observation) @@ -195,65 +209,50 @@ def test_policy(env_name, policy_name, extra_overrides): env.step(action) -@pytest.mark.skip("TODO after v2 migration / removing hydra") +# TODO(rcadene, aliberts): This test is quite end-to-end. Move this test in test_optimizer? def test_act_backbone_lr(): """ Test that the ACT policy can be instantiated with a different learning rate for the backbone. """ - cfg = init_hydra_config( - DEFAULT_CONFIG_PATH, - overrides=[ - "env=aloha", - "policy=act", - f"device={DEVICE}", - "training.lr_backbone=0.001", - "training.lr=0.01", - ], + + cfg = TrainPipelineConfig( + # TODO(rcadene, aliberts): remove dataset download + dataset=DatasetConfig(repo_id="lerobot/aloha_sim_insertion_scripted", episodes=[0]), + policy=make_policy_config("act", optimizer_lr=0.01, optimizer_lr_backbone=0.001), + device=DEVICE, ) - assert cfg.training.lr == 0.01 - assert cfg.training.lr_backbone == 0.001 + + assert cfg.policy.optimizer_lr == 0.01 + assert cfg.policy.optimizer_lr_backbone == 0.001 dataset = make_dataset(cfg) - policy = make_policy(hydra_cfg=cfg, dataset_stats=dataset.meta.stats) + policy = make_policy(cfg.policy, device=DEVICE, ds_meta=dataset.meta) optimizer, _ = make_optimizer_and_scheduler(cfg, policy) assert len(optimizer.param_groups) == 2 - assert optimizer.param_groups[0]["lr"] == cfg.training.lr - assert optimizer.param_groups[1]["lr"] == cfg.training.lr_backbone + assert optimizer.param_groups[0]["lr"] == cfg.policy.optimizer_lr + assert optimizer.param_groups[1]["lr"] == cfg.policy.optimizer_lr_backbone assert len(optimizer.param_groups[0]["params"]) == 133 assert len(optimizer.param_groups[1]["params"]) == 20 @pytest.mark.parametrize("policy_name", available_policies) -def test_policy_defaults(policy_name: str): +def test_policy_defaults(dummy_dataset_metadata, policy_name: str): """Check that the policy can be instantiated with defaults.""" - policy_cls, _ = get_policy_and_config_classes(policy_name) - policy_cls() - - -@pytest.mark.parametrize( - "env_name,policy_name", - [ - ("xarm", "tdmpc"), - ("pusht", "diffusion"), - ("aloha", "act"), - ], -) -def test_yaml_matches_dataclass(env_name: str, policy_name: str): - """Check that dataclass configs match their respective yaml configs.""" - hydra_cfg = init_hydra_config(DEFAULT_CONFIG_PATH, overrides=[f"env={env_name}", f"policy={policy_name}"]) - _, policy_cfg_cls = get_policy_and_config_classes(policy_name) - policy_cfg_from_hydra = _policy_cfg_from_hydra_cfg(policy_cfg_cls, hydra_cfg) - policy_cfg_from_dataclass = policy_cfg_cls() - assert policy_cfg_from_hydra == policy_cfg_from_dataclass + policy_cls = get_policy_class(policy_name) + policy_cfg = make_policy_config(policy_name) + policy_cfg.parse_features_from_dataset(dummy_dataset_metadata) + policy_cls(policy_cfg) @pytest.mark.parametrize("policy_name", available_policies) -def test_save_and_load_pretrained(policy_name: str): - policy_cls, _ = get_policy_and_config_classes(policy_name) - policy: Policy = policy_cls() - save_dir = "/tmp/test_save_and_load_pretrained_{policy_cls.__name__}" +def test_save_and_load_pretrained(dummy_dataset_metadata, tmp_path, policy_name: str): + policy_cls = get_policy_class(policy_name) + policy_cfg = make_policy_config(policy_name) + policy_cfg.parse_features_from_dataset(dummy_dataset_metadata) + policy = policy_cls(policy_cfg) + save_dir = tmp_path / f"test_save_and_load_pretrained_{policy_cls.__name__}" policy.save_pretrained(save_dir) - policy_ = policy_cls.from_pretrained(save_dir) + policy_ = policy_cls.from_pretrained(save_dir, config=policy_cfg) assert all(torch.equal(p, p_) for p, p_ in zip(policy.parameters(), policy_.parameters(), strict=True)) @@ -267,21 +266,28 @@ def test_normalize(insert_temporal_dim): expected. """ - input_shapes = { - "observation.image": [3, 96, 96], - "observation.state": [10], - } - output_shapes = { - "action": [5], - } - - normalize_input_modes = { - "observation.image": "mean_std", - "observation.state": "min_max", - } - unnormalize_output_modes = { - "action": "min_max", - } + input_features = [ + PolicyFeature( + key="observation.image", + type=FeatureType.VISUAL, + normalization_mode=NormalizationMode.MEAN_STD, + shape=[3, 96, 96], + ), + PolicyFeature( + key="observation.state", + type=FeatureType.STATE, + normalization_mode=NormalizationMode.MIN_MAX, + shape=[10], + ), + ] + output_features = [ + PolicyFeature( + key="action", + type=FeatureType.ACTION, + normalization_mode=NormalizationMode.MIN_MAX, + shape=[5], + ), + ] dataset_stats = { "observation.image": { @@ -324,59 +330,76 @@ def test_normalize(insert_temporal_dim): output_batch[key] = torch.stack([output_batch[key]] * tdim, dim=1) # test without stats - normalize = Normalize(input_shapes, normalize_input_modes, stats=None) + normalize = Normalize(input_features, stats=None) with pytest.raises(AssertionError): normalize(input_batch) # test with stats - normalize = Normalize(input_shapes, normalize_input_modes, stats=dataset_stats) + normalize = Normalize(input_features, stats=dataset_stats) normalize(input_batch) # test loading pretrained models - new_normalize = Normalize(input_shapes, normalize_input_modes, stats=None) + new_normalize = Normalize(input_features, stats=None) new_normalize.load_state_dict(normalize.state_dict()) new_normalize(input_batch) # test without stats - unnormalize = Unnormalize(output_shapes, unnormalize_output_modes, stats=None) + unnormalize = Unnormalize(output_features, stats=None) with pytest.raises(AssertionError): unnormalize(output_batch) # test with stats - unnormalize = Unnormalize(output_shapes, unnormalize_output_modes, stats=dataset_stats) + unnormalize = Unnormalize(output_features, stats=dataset_stats) unnormalize(output_batch) # test loading pretrained models - new_unnormalize = Unnormalize(output_shapes, unnormalize_output_modes, stats=None) + new_unnormalize = Unnormalize(output_features, stats=None) new_unnormalize.load_state_dict(unnormalize.state_dict()) unnormalize(output_batch) -@pytest.mark.skip("TODO after v2 migration / removing hydra") @pytest.mark.parametrize( - "env_name, policy_name, extra_overrides, file_name_extra", + "ds_repo_id, env_name, policy_name, policy_kwargs, train_kwargs, file_name_extra", [ # TODO(alexander-soare): `policy.use_mpc=false` was previously the default in the config yaml but it # was changed to true. For some reason, tests would pass locally, but not in CI. So here we override # to test with `policy.use_mpc=false`. - ("xarm", "tdmpc", ["policy.use_mpc=false"], "use_policy"), - # ("xarm", "tdmpc", ["policy.use_mpc=true"], "use_mpc"), + ("lerobot/xarm_lift_medium", "xarm", "tdmpc", {"use_mpc": False}, {"batch_size": 25}, "use_policy"), + # ("lerobot/xarm_lift_medium", "xarm", "tdmpc", {"use_mpc": True}, {}, "use_mpc"), + # TODO(rcadene): the diffusion model was normalizing the image in mean=0.5 std=0.5 which is a hack supposed to + # to normalize the image at all. In our current codebase we dont normalize at all. But there is still a minor difference + # that fails the test. However, by testing to normalize the image with 0.5 0.5 in the current codebase, the test pass. + # Thus, we deactivate this test for now. + # ( + # "lerobot/pusht", + # "pusht", + # "diffusion", + # { + # "n_action_steps": 8, + # "num_inference_steps": 10, + # "down_dims": [128, 256, 512], + # }, + # {"batch_size": 64}, + # "", + # ), + ("lerobot/aloha_sim_insertion_human", "aloha", "act", {"n_action_steps": 10}, {}, ""), ( - "pusht", - "diffusion", - ["policy.n_action_steps=8", "policy.num_inference_steps=10", "policy.down_dims=[128, 256, 512]"], - "", + "lerobot/aloha_sim_insertion_human", + "aloha", + "act", + {"n_action_steps": 1000, "chunk_size": 1000}, + {}, + "_1000_steps", ), - ("aloha", "act", ["policy.n_action_steps=10"], ""), - ("aloha", "act", ["policy.n_action_steps=1000", "policy.chunk_size=1000"], "_1000_steps"), - ("dora_aloha_real", "act_aloha_real", ["policy.n_action_steps=10"], ""), ], ) # As artifacts have been generated on an x86_64 kernel, this test won't # pass if it's run on another platform due to floating point errors @require_x86_64_kernel @require_cpu -def test_backward_compatibility(env_name, policy_name, extra_overrides, file_name_extra): +def test_backward_compatibility( + ds_repo_id, env_name, policy_name, policy_kwargs, train_kwargs, file_name_extra +): """ NOTE: If this test does not pass, and you have intentionally changed something in the policy: 1. Inspect the differences in policy outputs and make sure you can account for them. Your PR should @@ -397,16 +420,18 @@ def test_backward_compatibility(env_name, policy_name, extra_overrides, file_nam saved_param_stats = load_file(env_policy_dir / "param_stats.safetensors") saved_actions = load_file(env_policy_dir / "actions.safetensors") - output_dict, grad_stats, param_stats, actions = get_policy_stats(env_name, policy_name, extra_overrides) + output_dict, grad_stats, param_stats, actions = get_policy_stats( + ds_repo_id, env_name, policy_name, policy_kwargs, train_kwargs + ) for key in saved_output_dict: - assert torch.isclose(output_dict[key], saved_output_dict[key], rtol=0.1, atol=1e-7).all() + assert torch.allclose(output_dict[key], saved_output_dict[key], rtol=0.1, atol=1e-7) for key in saved_grad_stats: - assert torch.isclose(grad_stats[key], saved_grad_stats[key], rtol=0.1, atol=1e-7).all() + assert torch.allclose(grad_stats[key], saved_grad_stats[key], rtol=0.1, atol=1e-7) for key in saved_param_stats: - assert torch.isclose(param_stats[key], saved_param_stats[key], rtol=50, atol=1e-7).all() + assert torch.allclose(param_stats[key], saved_param_stats[key], rtol=0.1, atol=1e-7) for key in saved_actions: - assert torch.isclose(actions[key], saved_actions[key], rtol=0.1, atol=1e-7).all() + assert torch.allclose(actions[key], saved_actions[key], rtol=0.1, atol=1e-7) def test_act_temporal_ensembler(): diff --git a/tests/test_robots.py b/tests/test_robots.py index 05966ff15d..e03b5f78f1 100644 --- a/tests/test_robots.py +++ b/tests/test_robots.py @@ -28,9 +28,9 @@ import pytest import torch -from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot +from lerobot.common.robot_devices.robots.utils import make_robot from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError -from tests.utils import TEST_ROBOT_TYPES, make_robot, mock_calibration_dir, require_robot +from tests.utils import TEST_ROBOT_TYPES, mock_calibration_dir, require_robot @pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES) @@ -39,12 +39,12 @@ def test_robot(tmpdir, request, robot_type, mock): # TODO(rcadene): measure fps in nightly? # TODO(rcadene): test logs # TODO(rcadene): add compatibility with other robots - robot_kwargs = {"robot_type": robot_type} + robot_kwargs = {"robot_type": robot_type, "mock": mock} if robot_type == "aloha" and mock: # To simplify unit test, we do not rerun manual calibration for Aloha mock=True. # Instead, we use the files from '.cache/calibration/aloha_default' - overrides_calibration_dir = None + pass else: if mock: request.getfixturevalue("patch_builtins_input") @@ -52,18 +52,11 @@ def test_robot(tmpdir, request, robot_type, mock): # Create an empty calibration directory to trigger manual calibration tmpdir = Path(tmpdir) calibration_dir = tmpdir / robot_type - overrides_calibration_dir = [f"calibration_dir={calibration_dir}"] mock_calibration_dir(calibration_dir) robot_kwargs["calibration_dir"] = calibration_dir - # Test connecting without devices raises an error - robot = ManipulatorRobot(**robot_kwargs) - with pytest.raises(ValueError): - robot.connect() - del robot - # Test using robot before connecting raises an error - robot = ManipulatorRobot(**robot_kwargs) + robot = make_robot(**robot_kwargs) with pytest.raises(RobotDeviceNotConnectedError): robot.teleop_step() with pytest.raises(RobotDeviceNotConnectedError): @@ -79,7 +72,7 @@ def test_robot(tmpdir, request, robot_type, mock): del robot # Test connecting (triggers manual calibration) - robot = make_robot(robot_type, overrides=overrides_calibration_dir, mock=mock) + robot = make_robot(**robot_kwargs) robot.connect() assert robot.is_connected @@ -92,9 +85,7 @@ def test_robot(tmpdir, request, robot_type, mock): robot.disconnect() # Test teleop can run - robot = make_robot(robot_type, overrides=overrides_calibration_dir, mock=mock) - if overrides_calibration_dir is not None: - robot.calibration_dir = calibration_dir + robot = make_robot(**robot_kwargs) robot.connect() robot.teleop_step() diff --git a/tests/test_utils.py b/tests/test_utils.py index 8880d28c37..071e17b8ec 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,6 +1,5 @@ import random from typing import Callable -from uuid import uuid4 import numpy as np import pytest @@ -13,7 +12,6 @@ ) from lerobot.common.utils.utils import ( get_global_random_state, - init_hydra_config, seeded_context, set_global_random_state, set_global_seed, @@ -70,10 +68,3 @@ def test_calculate_episode_data_index(): episode_data_index = calculate_episode_data_index(dataset) assert torch.equal(episode_data_index["from"], torch.tensor([0, 2, 3])) assert torch.equal(episode_data_index["to"], torch.tensor([2, 3, 6])) - - -def test_init_hydra_config_empty(): - test_file = f"/tmp/test_init_hydra_config_empty_{uuid4().hex}.yaml" - with open(test_file, "w") as f: - f.write("\n") - init_hydra_config(test_file) diff --git a/tests/utils.py b/tests/utils.py index f24b3551c7..c49b5b9fff 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -16,7 +16,6 @@ import json import os import platform -from copy import copy from functools import wraps from pathlib import Path @@ -25,18 +24,12 @@ from lerobot import available_cameras, available_motors, available_robots from lerobot.common.robot_devices.cameras.utils import Camera +from lerobot.common.robot_devices.cameras.utils import make_camera as make_camera_device from lerobot.common.robot_devices.motors.utils import MotorsBus -from lerobot.common.robot_devices.robots.factory import make_robot as make_robot_from_cfg -from lerobot.common.robot_devices.robots.utils import Robot +from lerobot.common.robot_devices.motors.utils import make_motors_bus as make_motors_bus_device from lerobot.common.utils.import_utils import is_package_available -from lerobot.common.utils.utils import init_hydra_config -DEVICE = "cuda" if torch.cuda.is_available() else "cpu" - -# Pass this as the first argument to init_hydra_config. -DEFAULT_CONFIG_PATH = "lerobot/configs/default.yaml" - -ROBOT_CONFIG_PATH_TEMPLATE = "lerobot/configs/robot/{robot}.yaml" +DEVICE = os.environ.get("LEROBOT_TEST_DEVICE", "cuda") if torch.cuda.is_available() else "cpu" TEST_ROBOT_TYPES = [] for robot_type in available_robots: @@ -52,7 +45,7 @@ # Camera indices used for connecting physical cameras OPENCV_CAMERA_INDEX = int(os.environ.get("LEROBOT_TEST_OPENCV_CAMERA_INDEX", 0)) -INTELREALSENSE_CAMERA_INDEX = int(os.environ.get("LEROBOT_TEST_INTELREALSENSE_CAMERA_INDEX", 128422271614)) +INTELREALSENSE_SERIAL_NUMBER = int(os.environ.get("LEROBOT_TEST_INTELREALSENSE_SERIAL_NUMBER", 128422271614)) DYNAMIXEL_PORT = os.environ.get("LEROBOT_TEST_DYNAMIXEL_PORT", "/dev/tty.usbmodem575E0032081") DYNAMIXEL_MOTORS = { @@ -309,79 +302,30 @@ def mock_calibration_dir(calibration_dir): json.dump(example_calib, f) -def make_robot(robot_type: str, overrides: list[str] | None = None, mock=False) -> Robot: - if mock: - overrides = [] if overrides is None else copy(overrides) - - # Explicitely add mock argument to the cameras and set it to true - # TODO(rcadene, aliberts): redesign when we drop hydra - if robot_type in ["koch", "so100", "moss"]: - overrides.append("+leader_arms.main.mock=true") - overrides.append("+follower_arms.main.mock=true") - if "~cameras" not in overrides: - overrides.append("+cameras.laptop.mock=true") - overrides.append("+cameras.phone.mock=true") - - elif robot_type == "koch_bimanual": - overrides.append("+leader_arms.left.mock=true") - overrides.append("+leader_arms.right.mock=true") - overrides.append("+follower_arms.left.mock=true") - overrides.append("+follower_arms.right.mock=true") - if "~cameras" not in overrides: - overrides.append("+cameras.laptop.mock=true") - overrides.append("+cameras.phone.mock=true") - - elif robot_type == "aloha": - overrides.append("+leader_arms.left.mock=true") - overrides.append("+leader_arms.right.mock=true") - overrides.append("+follower_arms.left.mock=true") - overrides.append("+follower_arms.right.mock=true") - if "~cameras" not in overrides: - overrides.append("+cameras.cam_high.mock=true") - overrides.append("+cameras.cam_low.mock=true") - overrides.append("+cameras.cam_left_wrist.mock=true") - overrides.append("+cameras.cam_right_wrist.mock=true") - - else: - raise NotImplementedError(robot_type) - - config_path = ROBOT_CONFIG_PATH_TEMPLATE.format(robot=robot_type) - robot_cfg = init_hydra_config(config_path, overrides) - robot = make_robot_from_cfg(robot_cfg) - return robot - - -def make_camera(camera_type, **kwargs) -> Camera: +# TODO(rcadene, aliberts): remove this dark pattern that overrides +def make_camera(camera_type: str, **kwargs) -> Camera: if camera_type == "opencv": - from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera - camera_index = kwargs.pop("camera_index", OPENCV_CAMERA_INDEX) - return OpenCVCamera(camera_index, **kwargs) + return make_camera_device(camera_type, camera_index=camera_index, **kwargs) elif camera_type == "intelrealsense": - from lerobot.common.robot_devices.cameras.intelrealsense import IntelRealSenseCamera - - camera_index = kwargs.pop("camera_index", INTELREALSENSE_CAMERA_INDEX) - return IntelRealSenseCamera(camera_index, **kwargs) - + serial_number = kwargs.pop("serial_number", INTELREALSENSE_SERIAL_NUMBER) + return make_camera_device(camera_type, serial_number=serial_number, **kwargs) else: raise ValueError(f"The camera type '{camera_type}' is not valid.") +# TODO(rcadene, aliberts): remove this dark pattern that overrides def make_motors_bus(motor_type: str, **kwargs) -> MotorsBus: if motor_type == "dynamixel": - from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus - port = kwargs.pop("port", DYNAMIXEL_PORT) motors = kwargs.pop("motors", DYNAMIXEL_MOTORS) - return DynamixelMotorsBus(port, motors, **kwargs) + return make_motors_bus_device(motor_type, port=port, motors=motors, **kwargs) elif motor_type == "feetech": - from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus - port = kwargs.pop("port", FEETECH_PORT) motors = kwargs.pop("motors", FEETECH_MOTORS) - return FeetechMotorsBus(port, motors, **kwargs) + return make_motors_bus_device(motor_type, port=port, motors=motors, **kwargs) else: raise ValueError(f"The motor type '{motor_type}' is not valid.") From 02b996a72f27e10b189d3ff426c72ab159ddc360 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 9 Jan 2025 16:09:33 +0100 Subject: [PATCH 25/94] Fix logger --- lerobot/common/logger.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index e39cdb216b..6cd2b56d46 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -21,13 +21,13 @@ import logging import os import re +from dataclasses import asdict from glob import glob from pathlib import Path import draccus import torch from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE -from omegaconf import DictConfig, OmegaConf from termcolor import colored from torch.optim import Optimizer from torch.optim.lr_scheduler import LRScheduler @@ -45,7 +45,7 @@ def log_output_dir(out_dir): logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {out_dir}") -def cfg_to_group(cfg: DictConfig, return_list: bool = False) -> list[str] | str: +def cfg_to_group(cfg: TrainPipelineConfig, return_list: bool = False) -> list[str] | str: """Return a group name for logging. Optionally returns group name as list.""" lst = [ f"policy:{cfg.policy.type}", @@ -121,7 +121,7 @@ def __init__(self, cfg: TrainPipelineConfig): notes=cfg.wandb.notes, tags=cfg_to_group(cfg, return_list=True), dir=self.log_dir, - config=OmegaConf.to_container(cfg, resolve=True), + config=asdict(self.cfg), # TODO(rcadene): try set to True save_code=False, # TODO(rcadene): split train and eval, and run async eval with job_type="eval" From a69b42507b0872fadb60783f899d12487721b3d8 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 9 Jan 2025 16:16:50 +0100 Subject: [PATCH 26/94] Remove hydra-core --- poetry.lock | 22 +++------------------- pyproject.toml | 1 - 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/poetry.lock b/poetry.lock index 33c4d7f151..0b71c083f1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1297,8 +1297,8 @@ dev = ["black", "mypy", "pre-commit", "pytest", "ruff"] [package.source] type = "git" url = "https://github.com/dlwh/draccus.git" -reference = "55e456a" -resolved_reference = "55e456a3047a97135e47bbeefe06873d2727dd41" +reference = "HEAD" +resolved_reference = "9b690730ca108930519f48cc5dead72a72fd27cb" [[package]] name = "drawnow" @@ -2265,22 +2265,6 @@ testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gr torch = ["safetensors[torch]", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] -[[package]] -name = "hydra-core" -version = "1.3.2" -description = "A framework for elegantly configuring complex applications" -optional = false -python-versions = "*" -files = [ - {file = "hydra-core-1.3.2.tar.gz", hash = "sha256:8a878ed67216997c3e9d88a8e72e7b4767e81af37afb4ea3334b269a4390a824"}, - {file = "hydra_core-1.3.2-py3-none-any.whl", hash = "sha256:fa0238a9e31df3373b35b0bfb672c34cc92718d21f81311d8996a16de1141d8b"}, -] - -[package.dependencies] -antlr4-python3-runtime = "==4.9.*" -omegaconf = ">=2.2,<2.4" -packaging = "*" - [[package]] name = "identify" version = "2.6.5" @@ -7689,4 +7673,4 @@ xarm = ["gym-xarm"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "a67ba1530855ce3ad03d8ea9629f2fae439d8c5b83702c64f73092ea4f9fa43b" +content-hash = "df9042654c525f96e88e868d95f8746d69f3c30b00cdf064625df8aa540ea488" diff --git a/pyproject.toml b/pyproject.toml index 30bc7eb96b..c1b791d731 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,6 @@ omegaconf = ">=2.3.0" wandb = ">=0.16.3" imageio = {extras = ["ffmpeg"], version = ">=2.34.0"} gdown = ">=5.1.0" -hydra-core = ">=1.3.2" einops = ">=0.8.0" pymunk = ">=6.6.0" zarr = ">=2.17.0" From 5871fe8e8b0c1f56cf5138ed4ae58ae1a6eb7c3f Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 9 Jan 2025 18:10:32 +0100 Subject: [PATCH 27/94] Remove NoneSchedulerConfig --- lerobot/common/optim/factory.py | 2 +- lerobot/common/optim/schedulers.py | 9 --------- lerobot/common/policies/act/configuration_act.py | 5 ++--- lerobot/common/policies/tdmpc/configuration_tdmpc.py | 5 ++--- lerobot/configs/policies.py | 11 ++++++----- 5 files changed, 11 insertions(+), 21 deletions(-) diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py index cc27313965..c81579cd4f 100644 --- a/lerobot/common/optim/factory.py +++ b/lerobot/common/optim/factory.py @@ -31,7 +31,7 @@ def make_optimizer_and_scheduler( ) -> tuple[Optimizer, LRScheduler | None]: params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters() optimizer = cfg.optimizer.build(params) - lr_scheduler = cfg.scheduler.build(optimizer, cfg.offline.steps) + lr_scheduler = cfg.scheduler.build(optimizer, cfg.offline.steps) if cfg.scheduler is not None else None return optimizer, lr_scheduler diff --git a/lerobot/common/optim/schedulers.py b/lerobot/common/optim/schedulers.py index 36e2112ec1..752ddeff2e 100644 --- a/lerobot/common/optim/schedulers.py +++ b/lerobot/common/optim/schedulers.py @@ -20,15 +20,6 @@ def build(self, optimizer: Optimizer, num_training_steps: int) -> LRScheduler | raise NotImplementedError -@LRSchedulerConfig.register_subclass("none") -@dataclass -class NoneSchedulerConfig(LRSchedulerConfig): - num_warmup_steps: int = 0 - - def build(self, optimizer: Optimizer, num_training_steps: int) -> None: - return None - - @LRSchedulerConfig.register_subclass("diffuser") @dataclass class DiffuserSchedulerConfig(LRSchedulerConfig): diff --git a/lerobot/common/policies/act/configuration_act.py b/lerobot/common/policies/act/configuration_act.py index e26efa8a96..858cd70d36 100644 --- a/lerobot/common/policies/act/configuration_act.py +++ b/lerobot/common/policies/act/configuration_act.py @@ -16,7 +16,6 @@ from dataclasses import dataclass, field from lerobot.common.optim.optimizers import AdamWConfig -from lerobot.common.optim.schedulers import NoneSchedulerConfig from lerobot.configs.policies import PretrainedConfig from lerobot.configs.types import NormalizationMode @@ -167,8 +166,8 @@ def get_optimizer_preset(self) -> AdamWConfig: weight_decay=self.optimizer_weight_decay, ) - def get_scheduler_preset(self) -> NoneSchedulerConfig: - return NoneSchedulerConfig() + def get_scheduler_preset(self) -> None: + return None def validate_features(self) -> None: if not self.image_features and not self.env_state_feature: diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/lerobot/common/policies/tdmpc/configuration_tdmpc.py index a5d305019a..e0757e13d7 100644 --- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py +++ b/lerobot/common/policies/tdmpc/configuration_tdmpc.py @@ -17,7 +17,6 @@ from dataclasses import dataclass, field from lerobot.common.optim.optimizers import AdamConfig -from lerobot.common.optim.schedulers import NoneSchedulerConfig from lerobot.configs.policies import PretrainedConfig from lerobot.configs.types import NormalizationMode @@ -191,8 +190,8 @@ def __post_init__(self): def get_optimizer_preset(self) -> AdamConfig: return AdamConfig(lr=self.optimizer_lr) - def get_scheduler_preset(self) -> NoneSchedulerConfig: - return NoneSchedulerConfig() + def get_scheduler_preset(self) -> None: + return None def validate_features(self) -> None: # There should only be one image key. diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index ba00dcacc9..90bfa00875 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -9,6 +9,7 @@ import draccus import gymnasium as gym from huggingface_hub import ModelHubMixin, hf_hub_download +from huggingface_hub.constants import CONFIG_NAME from huggingface_hub.errors import HfHubHTTPError from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata @@ -72,7 +73,7 @@ def get_optimizer_preset(self) -> OptimizerConfig: raise NotImplementedError @abc.abstractmethod - def get_scheduler_preset(self) -> LRSchedulerConfig: + def get_scheduler_preset(self) -> LRSchedulerConfig | None: raise NotImplementedError @abc.abstractmethod @@ -95,7 +96,7 @@ def output_features(self) -> list[PolicyFeature]: def _save_pretrained(self, save_directory: Path) -> None: to_save = copy(self) del to_save.path - with open(save_directory / "config.json", "w") as f, draccus.config_type("json"): + with open(save_directory / CONFIG_NAME, "w") as f, draccus.config_type("json"): draccus.dump(self, f, indent=4) @classmethod @@ -115,15 +116,15 @@ def from_pretrained( model_id = str(pretrained_model_name_or_path) config_file: str | None = None if Path(model_id).is_dir(): - if "config.json" in os.listdir(model_id): - config_file = os.path.join(model_id, "config.json") + if CONFIG_NAME in os.listdir(model_id): + config_file = os.path.join(model_id, CONFIG_NAME) else: print(f"config.json not found in {Path(model_id).resolve()}") else: try: config_file = hf_hub_download( repo_id=model_id, - filename="config.json", + filename=CONFIG_NAME, revision=revision, cache_dir=cache_dir, force_download=force_download, From 3c5e8a5ef35f0e6a809ec4366be42268d1b9a95a Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 9 Jan 2025 19:01:46 +0100 Subject: [PATCH 28/94] Add push_pretrained --- lerobot/scripts/push_pretrained.py | 60 ++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 lerobot/scripts/push_pretrained.py diff --git a/lerobot/scripts/push_pretrained.py b/lerobot/scripts/push_pretrained.py new file mode 100644 index 0000000000..68360339b7 --- /dev/null +++ b/lerobot/scripts/push_pretrained.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Once you have trained a policy with our training script (lerobot/scripts/train.py), use this script to push it +to the hub. + +Example: + +```bash +python lerobot/scripts/push_pretrained.py \ + --pretrained_path=outputs/train/act_aloha_sim_transfer_cube_human/checkpoints/last/pretrained_model \ + --repo_id=lerobot/act_aloha_sim_transfer_cube_human +``` +""" + +from dataclasses import dataclass +from pathlib import Path + +import draccus +from huggingface_hub import create_repo, upload_folder + + +@dataclass +class PushPretrainedConfig: + pretrained_path: Path + repo_id: str + private: bool = False + exist_ok: bool = False + + +@draccus.wrap() +def main(cfg: PushPretrainedConfig): + create_repo( + repo_id=cfg.repo_id, + private=cfg.private, + repo_type="model", + exist_ok=cfg.exist_ok, + ) + upload_folder( + repo_id=cfg.repo_id, + folder_path=cfg.pretrained_path, + repo_type="model", + ) + + +if __name__ == "__main__": + main() From 1eb8527d36698002b191993739da3788946cb099 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 9 Jan 2025 20:55:30 +0100 Subject: [PATCH 29/94] Remove eval.episode_length --- Makefile | 4 ---- lerobot/configs/eval.py | 1 - 2 files changed, 5 deletions(-) diff --git a/Makefile b/Makefile index 39881874f8..073b4183ac 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,6 @@ test-act-ete-eval: --env.episode_length=5 \ --eval.n_episodes=1 \ --eval.batch_size=1 \ - --eval.episode_length=8 \ --device=$(DEVICE) test-diffusion-ete-train: @@ -99,7 +98,6 @@ test-diffusion-ete-eval: --env.episode_length=5 \ --eval.n_episodes=1 \ --eval.batch_size=1 \ - --eval.episode_length=8 \ --device=$(DEVICE) test-tdmpc-ete-train: @@ -129,10 +127,8 @@ test-tdmpc-ete-eval: --env.type=xarm \ --env.episode_length=5 \ --env.task=XarmLift-v0 \ - --env.episode_length=2 \ --eval.n_episodes=1 \ --eval.batch_size=1 \ - --eval.episode_length=8 \ --device=$(DEVICE) # FIXME: currently broken diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index b19033ace4..58e3c12a0e 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -10,7 +10,6 @@ @dataclass class EvalConfig: n_episodes: int = 50 - episode_length: int | None = None # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. batch_size: int = 50 # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). From abaf654e5f6208394175faa606e865174ee318ce Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 9 Jan 2025 20:56:42 +0100 Subject: [PATCH 30/94] Fix wandb_video --- lerobot/common/logger.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 6cd2b56d46..357d6fd83c 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -228,7 +228,7 @@ def log_dict(self, d: dict, step: int, mode: str = "train"): def log_video(self, video_path: str, step: int, mode: str = "train"): assert mode in {"train", "eval"} assert self._wandb is not None - wandb_video = self._wandb.Video(video_path, fps=self._cfg.fps, format="mp4") + wandb_video = self._wandb.Video(video_path, fps=self._cfg.env.fps, format="mp4") self._wandb.log({f"{mode}/video": wandb_video}, step=step) From 6bd9e12e439f19ae2774cc87f81b7551c3a6e04f Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 10 Jan 2025 15:47:48 +0100 Subject: [PATCH 31/94] Fix typo --- lerobot/common/datasets/lerobot_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/datasets/lerobot_dataset.py b/lerobot/common/datasets/lerobot_dataset.py index b32cf7095c..6db1612821 100644 --- a/lerobot/common/datasets/lerobot_dataset.py +++ b/lerobot/common/datasets/lerobot_dataset.py @@ -833,7 +833,7 @@ def start_image_writer(self, num_processes: int = 0, num_threads: int = 4) -> No def stop_image_writer(self) -> None: """ Whenever wrapping this dataset inside a parallelized DataLoader, this needs to be called first to - remove the image_write in order for the LeRobotDataset object to be pickleable and parallelized. + remove the image_writer in order for the LeRobotDataset object to be pickleable and parallelized. """ if self.image_writer is not None: self.image_writer.stop() From aa228c0c2a3a93291943fd530c4b3efa2dd2e53e Mon Sep 17 00:00:00 2001 From: Simon Alibert <75076266+aliberts@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:44:17 +0100 Subject: [PATCH 32/94] Add features back into policy configs (#643) --- lerobot/common/constants.py | 6 + lerobot/common/datasets/utils.py | 33 +++- lerobot/common/envs/configs.py | 88 ++++++--- lerobot/common/envs/utils.py | 25 +++ lerobot/common/policies/act/modeling_act.py | 16 +- .../diffusion/configuration_diffusion.py | 11 +- .../policies/diffusion/modeling_diffusion.py | 28 +-- lerobot/common/policies/factory.py | 16 +- lerobot/common/policies/normalize.py | 85 +++++---- .../policies/tdmpc/configuration_tdmpc.py | 2 +- .../common/policies/tdmpc/modeling_tdmpc.py | 32 +++- .../policies/vqbet/configuration_vqbet.py | 11 +- .../common/policies/vqbet/modeling_vqbet.py | 25 ++- lerobot/common/utils/utils.py | 11 ++ lerobot/configs/policies.py | 180 +++--------------- lerobot/configs/types.py | 7 + tests/test_policies.py | 65 ++++--- 17 files changed, 328 insertions(+), 313 deletions(-) create mode 100644 lerobot/common/constants.py diff --git a/lerobot/common/constants.py b/lerobot/common/constants.py new file mode 100644 index 0000000000..7388959486 --- /dev/null +++ b/lerobot/common/constants.py @@ -0,0 +1,6 @@ +# keys +OBS_ENV = "observation.environment_state" +OBS_ROBOT = "observation.state" +OBS_IMAGE = "observation.image" +OBS_IMAGES = "observation.images" +ACTION = "action" diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index 1850c8aa46..612bac39ab 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -35,7 +35,7 @@ from torchvision import transforms from lerobot.common.robot_devices.robots.utils import Robot -from lerobot.configs.types import DictLike +from lerobot.configs.types import DictLike, FeatureType, PolicyFeature DEFAULT_CHUNK_SIZE = 1000 # Max number of episodes per chunk @@ -302,6 +302,37 @@ def get_features_from_robot(robot: Robot, use_videos: bool = True) -> dict: return {**robot.motor_features, **camera_ft, **DEFAULT_FEATURES} +def dataset_to_policy_features(features: dict[str, dict]) -> dict[str, PolicyFeature]: + # TODO(aliberts): Implement "type" in dataset features and simplify this + policy_features = {} + for key, ft in features.items(): + shape = ft["shape"] + if ft["dtype"] in ["image", "video"]: + type = FeatureType.VISUAL + if len(shape) != 3: + raise ValueError(f"Number of dimensions of {key} != 3 (shape={shape})") + + names = ft["names"] + # Backward compatibility for "channel" which is an error introduced in LeRobotDataset v2.0 for ported datasets. + if names[2] in ["channel", "channels"]: # (h, w, c) -> (c, h, w) + shape = (shape[2], shape[0], shape[1]) + elif key == "observation.environment_state": + type = FeatureType.ENV + elif key.startswith("observation"): + type = FeatureType.STATE + elif key == "action": + type = FeatureType.ACTION + else: + continue + + policy_features[key] = PolicyFeature( + type=type, + shape=shape, + ) + + return policy_features + + def create_empty_dataset_info( codebase_version: str, fps: int, diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py index 340682a692..d0c53d22d6 100644 --- a/lerobot/common/envs/configs.py +++ b/lerobot/common/envs/configs.py @@ -3,7 +3,8 @@ import draccus -from lerobot.configs.types import FeatureType +from lerobot.common.constants import ACTION, OBS_ENV, OBS_IMAGE, OBS_IMAGES, OBS_ROBOT +from lerobot.configs.types import FeatureType, PolicyFeature @dataclass @@ -11,7 +12,8 @@ class EnvConfig(draccus.ChoiceRegistry, abc.ABC): n_envs: int | None = None task: str | None = None fps: int = 30 - feature_types: dict = field(default_factory=dict) + features: dict[str, PolicyFeature] = field(default_factory=dict) + features_map: dict[str, str] = field(default_factory=dict) @property def type(self) -> str: @@ -28,17 +30,28 @@ class AlohaEnv(EnvConfig): task: str = "AlohaInsertion-v0" fps: int = 50 episode_length: int = 400 - feature_types: dict = field( + obs_type: str = "pixels_agent_pos" + render_mode: str = "rgb_array" + features: dict[str, PolicyFeature] = field( default_factory=lambda: { - "agent_pos": FeatureType.STATE, - "pixels": { - "top": FeatureType.VISUAL, - }, - "action": FeatureType.ACTION, + "action": PolicyFeature(type=FeatureType.ACTION, shape=(14,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + "action": ACTION, + "agent_pos": OBS_ROBOT, + "top": f"{OBS_IMAGE}.top", + "pixels/top": f"{OBS_IMAGES}.top", } ) - obs_type: str = "pixels_agent_pos" - render_mode: str = "rgb_array" + + def __post_init__(self): + if self.obs_type == "pixels": + self.features["top"] = PolicyFeature(type=FeatureType.VISUAL, shape=(480, 640, 3)) + elif self.obs_type == "pixels_agent_pos": + self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(14,)) + self.features["pixels/top"] = PolicyFeature(type=FeatureType.VISUAL, shape=(480, 640, 3)) @property def gym_kwargs(self) -> dict: @@ -55,25 +68,30 @@ class PushtEnv(EnvConfig): task: str = "PushT-v0" fps: int = 10 episode_length: int = 300 - feature_types: dict = field( - default_factory=lambda: { - "agent_pos": FeatureType.STATE, - "pixels": FeatureType.VISUAL, - "action": FeatureType.ACTION, - } - ) obs_type: str = "pixels_agent_pos" render_mode: str = "rgb_array" visualization_width: int = 384 visualization_height: int = 384 + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + "action": PolicyFeature(type=FeatureType.ACTION, shape=(2,)), + "agent_pos": PolicyFeature(type=FeatureType.STATE, shape=(2,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + "action": ACTION, + "agent_pos": OBS_ROBOT, + "environment_state": OBS_ENV, + "pixels": OBS_IMAGE, + } + ) def __post_init__(self): - if self.obs_type == "environment_state_agent_pos": - self.feature_types = { - "agent_pos": FeatureType.STATE, - "environment_state": FeatureType.ENV, - "action": FeatureType.ACTION, - } + if self.obs_type == "pixels_agent_pos": + self.features["pixels"] = PolicyFeature(type=FeatureType.VISUAL, shape=(384, 384, 3)) + elif self.obs_type == "environment_state_agent_pos": + self.features["environment_state"] = PolicyFeature(type=FeatureType.ENV, shape=(16,)) @property def gym_kwargs(self) -> dict: @@ -91,17 +109,27 @@ class XarmEnv(EnvConfig): task: str = "XarmLift-v0" fps: int = 15 episode_length: int = 200 - feature_types: dict = field( - default_factory=lambda: { - "agent_pos": FeatureType.STATE, - "pixels": FeatureType.VISUAL, - "action": FeatureType.ACTION, - } - ) obs_type: str = "pixels_agent_pos" render_mode: str = "rgb_array" visualization_width: int = 384 visualization_height: int = 384 + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + "action": PolicyFeature(type=FeatureType.ACTION, shape=(4,)), + "pixels": PolicyFeature(type=FeatureType.VISUAL, shape=(84, 84, 3)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + "action": ACTION, + "agent_pos": OBS_ROBOT, + "pixels": OBS_IMAGE, + } + ) + + def __post_init__(self): + if self.obs_type == "pixels_agent_pos": + self.features["agent_pos"] = PolicyFeature(type=FeatureType.STATE, shape=(4,)) @property def gym_kwargs(self) -> dict: diff --git a/lerobot/common/envs/utils.py b/lerobot/common/envs/utils.py index 9f4f7e17e9..30bbaf3968 100644 --- a/lerobot/common/envs/utils.py +++ b/lerobot/common/envs/utils.py @@ -18,6 +18,10 @@ import torch from torch import Tensor +from lerobot.common.envs.configs import EnvConfig +from lerobot.common.utils.utils import get_channel_first_image_shape +from lerobot.configs.types import FeatureType, PolicyFeature + def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]: # TODO(aliberts, rcadene): refactor this to use features from the environment (no hardcoding) @@ -36,6 +40,7 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten imgs = {"observation.image": observations["pixels"]} for imgkey, img in imgs.items(): + # TODO(aliberts, rcadene): use transforms.ToTensor()? img = torch.from_numpy(img) # sanity check that images are channel last @@ -61,3 +66,23 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten # requirement for "agent_pos" return_observations["observation.state"] = torch.from_numpy(observations["agent_pos"]).float() return return_observations + + +def env_to_policy_features(env_cfg: EnvConfig) -> dict[str, PolicyFeature]: + # TODO(aliberts, rcadene): remove this hardcoding of keys and just use the nested keys as is + # (need to also refactor preprocess_observation and externalize normalization from policies) + policy_features = {} + for key, ft in env_cfg.features.items(): + if ft.type is FeatureType.VISUAL: + if len(ft.shape) != 3: + raise ValueError(f"Number of dimensions of {key} != 3 (shape={ft.shape})") + + shape = get_channel_first_image_shape(ft.shape) + feature = PolicyFeature(type=ft.type, shape=shape) + else: + feature = ft + + policy_key = env_cfg.features_map[key] + policy_features[policy_key] = feature + + return policy_features diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index da66b5aaa1..2e7b1f9ce7 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -68,9 +68,13 @@ def __init__( config.validate_features() self.config = config - self.normalize_inputs = Normalize(config.input_features, dataset_stats) - self.normalize_targets = Normalize(config.output_features, dataset_stats) - self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) + self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) + self.normalize_targets = Normalize( + config.output_features, config.normalization_mapping, dataset_stats + ) + self.unnormalize_outputs = Unnormalize( + config.output_features, config.normalization_mapping, dataset_stats + ) self.model = ACT(config) @@ -121,7 +125,7 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch["observation.images"] = torch.stack( - [batch[ft.key] for ft in self.config.image_features], dim=-4 + [batch[key] for key in self.config.image_features], dim=-4 ) # If we are doing temporal ensembling, do online updates where we keep track of the number of actions @@ -151,7 +155,7 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch["observation.images"] = torch.stack( - [batch[ft.key] for ft in self.config.image_features], dim=-4 + [batch[key] for key in self.config.image_features], dim=-4 ) batch = self.normalize_targets(batch) actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch) @@ -411,7 +415,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, tuple[Tensor, Tenso """ if self.config.use_vae and self.training: assert ( - self.config.action_feature.key in batch + "action" in batch ), "actions must be provided when using the variational objective in training mode." batch_size = ( diff --git a/lerobot/common/policies/diffusion/configuration_diffusion.py b/lerobot/common/policies/diffusion/configuration_diffusion.py index fb58b3efdb..b92c9974c9 100644 --- a/lerobot/common/policies/diffusion/configuration_diffusion.py +++ b/lerobot/common/policies/diffusion/configuration_diffusion.py @@ -208,21 +208,20 @@ def validate_features(self) -> None: raise ValueError("You must provide at least one image or the environment state among the inputs.") if self.crop_shape is not None: - for image_ft in self.image_features: + for key, image_ft in self.image_features.items(): if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]: raise ValueError( f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} " f"for `crop_shape` and {image_ft.shape} for " - f"`{image_ft.key}`." + f"`{key}`." ) # Check that all input images have the same shape. - first_image_ft = next(iter(self.image_features)) - for image_ft in self.image_features: + first_image_key, first_image_ft = next(iter(self.image_features.items())) + for key, image_ft in self.image_features.items(): if image_ft.shape != first_image_ft.shape: raise ValueError( - f"`{image_ft.key}` does not match `{first_image_ft.key}`, but we " - "expect all image shapes to match." + f"`{key}` does not match `{first_image_key}`, but we " "expect all image shapes to match." ) @property diff --git a/lerobot/common/policies/diffusion/modeling_diffusion.py b/lerobot/common/policies/diffusion/modeling_diffusion.py index ec58c49ce9..174bb24ede 100644 --- a/lerobot/common/policies/diffusion/modeling_diffusion.py +++ b/lerobot/common/policies/diffusion/modeling_diffusion.py @@ -34,6 +34,7 @@ from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn +from lerobot.common.constants import OBS_ENV, OBS_ROBOT from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.utils import ( @@ -74,9 +75,13 @@ def __init__( config.validate_features() self.config = config - self.normalize_inputs = Normalize(config.input_features, dataset_stats) - self.normalize_targets = Normalize(config.output_features, dataset_stats) - self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) + self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) + self.normalize_targets = Normalize( + config.output_features, config.normalization_mapping, dataset_stats + ) + self.unnormalize_outputs = Unnormalize( + config.output_features, config.normalization_mapping, dataset_stats + ) # queues are populated during rollout of the policy, they contain the n latest observations and actions self._queues = None @@ -125,7 +130,7 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch["observation.images"] = torch.stack( - [batch[ft.key] for ft in self.config.image_features], dim=-4 + [batch[key] for key in self.config.image_features], dim=-4 ) # Note: It's important that this happens after stacking the images into a single key. self._queues = populate_queues(self._queues, batch) @@ -149,7 +154,7 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch["observation.images"] = torch.stack( - [batch[ft.key] for ft in self.config.image_features], dim=-4 + [batch[key] for key in self.config.image_features], dim=-4 ) batch = self.normalize_targets(batch) loss = self.diffusion.compute_loss(batch) @@ -237,8 +242,8 @@ def conditional_sample( def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: """Encode image features and concatenate them all together along with the state vector.""" - batch_size, n_obs_steps = batch[self.config.robot_state_feature.key].shape[:2] - global_cond_feats = [batch[self.config.robot_state_feature.key]] + batch_size, n_obs_steps = batch[OBS_ROBOT].shape[:2] + global_cond_feats = [batch[OBS_ROBOT]] # Extract image features. if self.config.image_features: if self.config.use_separate_rgb_encoder_per_camera: @@ -268,7 +273,7 @@ def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: global_cond_feats.append(img_features) if self.config.env_state_feature: - global_cond_feats.append(batch[self.config.env_state_feature.key]) + global_cond_feats.append(batch[OBS_ENV]) # Concatenate features then flatten to (B, global_cond_dim). return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1) @@ -482,10 +487,9 @@ def __init__(self, config: DiffusionConfig): # height and width from `config.image_features`. # Note: we have a check in the config class to make sure all images have the same shape. - dummy_shape_h_w = ( - config.crop_shape if config.crop_shape is not None else config.image_features[0].shape[1:] - ) - dummy_shape = (1, config.image_features[0].shape[0], *dummy_shape_h_w) + images_shape = next(iter(config.image_features.values())).shape + dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:] + dummy_shape = (1, images_shape[0], *dummy_shape_h_w) feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:] self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 6bc445685b..e89c9a7266 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -18,13 +18,16 @@ from torch import nn from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata +from lerobot.common.datasets.utils import dataset_to_policy_features from lerobot.common.envs.configs import EnvConfig +from lerobot.common.envs.utils import env_to_policy_features from lerobot.common.policies.act.configuration_act import ACTConfig from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.policy_protocol import Policy from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.types import FeatureType def get_policy_class(name: str) -> Policy: @@ -100,11 +103,18 @@ def make_policy( kwargs = {} if ds_meta is not None: - cfg.parse_features_from_dataset(ds_meta) + features = dataset_to_policy_features(ds_meta.features) kwargs["dataset_stats"] = ds_meta.stats else: - cfg.parse_features_from_env(env, env_cfg) - + if not cfg.pretrained_path or not cfg.output_features or not cfg.input_features: + raise NotImplementedError( + "The policy must have already existing features in its config when initializing it " + "with an environment." + ) + features = env_to_policy_features(env_cfg, cfg) + + cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} + cfg.input_features = {key: ft for key, ft in features.items() if key not in cfg.output_features} kwargs["config"] = cfg if cfg.pretrained_path: diff --git a/lerobot/common/policies/normalize.py b/lerobot/common/policies/normalize.py index d9bba38401..d8f021d938 100644 --- a/lerobot/common/policies/normalize.py +++ b/lerobot/common/policies/normalize.py @@ -16,12 +16,12 @@ import torch from torch import Tensor, nn -from lerobot.configs.policies import PolicyFeature -from lerobot.configs.types import FeatureType, NormalizationMode +from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature def create_stats_buffers( - features: list[PolicyFeature], + features: dict[str, PolicyFeature], + norm_map: dict[str, NormalizationMode], stats: dict[str, dict[str, Tensor]] | None = None, ) -> dict[str, dict[str, nn.ParameterDict]]: """ @@ -36,19 +36,20 @@ def create_stats_buffers( """ stats_buffers = {} - for ft in features: - if ft.normalization_mode is None: + for key, ft in features.items(): + norm_mode = norm_map.get(ft.type, None) + if norm_mode is None: continue - assert isinstance(ft.normalization_mode, NormalizationMode) + assert isinstance(norm_mode, NormalizationMode) shape = tuple(ft.shape) if ft.type is FeatureType.VISUAL: # sanity checks - assert len(shape) == 3, f"number of dimensions of {ft.key} != 3 ({shape=}" + assert len(shape) == 3, f"number of dimensions of {key} != 3 ({shape=}" c, h, w = shape - assert c < h and c < w, f"{ft.key} is not channel first ({shape=})" + assert c < h and c < w, f"{key} is not channel first ({shape=})" # override image shape to be invariant to height and width shape = (c, 1, 1) @@ -57,7 +58,7 @@ def create_stats_buffers( # we assert they are not infinity anymore. buffer = {} - if ft.normalization_mode is NormalizationMode.MEAN_STD: + if norm_mode is NormalizationMode.MEAN_STD: mean = torch.ones(shape, dtype=torch.float32) * torch.inf std = torch.ones(shape, dtype=torch.float32) * torch.inf buffer = nn.ParameterDict( @@ -66,7 +67,7 @@ def create_stats_buffers( "std": nn.Parameter(std, requires_grad=False), } ) - elif ft.normalization_mode is NormalizationMode.MIN_MAX: + elif norm_mode is NormalizationMode.MIN_MAX: min = torch.ones(shape, dtype=torch.float32) * torch.inf max = torch.ones(shape, dtype=torch.float32) * torch.inf buffer = nn.ParameterDict( @@ -81,14 +82,14 @@ def create_stats_buffers( # tensors anywhere (for example, when we use the same stats for normalization and # unnormalization). See the logic here # https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L97. - if ft.normalization_mode is NormalizationMode.MEAN_STD: - buffer["mean"].data = stats[ft.key]["mean"].clone() - buffer["std"].data = stats[ft.key]["std"].clone() - elif ft.normalization_mode is NormalizationMode.MIN_MAX: - buffer["min"].data = stats[ft.key]["min"].clone() - buffer["max"].data = stats[ft.key]["max"].clone() - - stats_buffers[ft.key] = buffer + if norm_mode is NormalizationMode.MEAN_STD: + buffer["mean"].data = stats[key]["mean"].clone() + buffer["std"].data = stats[key]["std"].clone() + elif norm_mode is NormalizationMode.MIN_MAX: + buffer["min"].data = stats[key]["min"].clone() + buffer["max"].data = stats[key]["max"].clone() + + stats_buffers[key] = buffer return stats_buffers @@ -104,7 +105,8 @@ class Normalize(nn.Module): def __init__( self, - features: list[PolicyFeature], + features: dict[str, PolicyFeature], + norm_map: dict[str, NormalizationMode], stats: dict[str, dict[str, Tensor]] | None = None, ): """ @@ -127,8 +129,9 @@ def __init__( """ super().__init__() self.features = features + self.norm_map = norm_map self.stats = stats - stats_buffers = create_stats_buffers(features, stats) + stats_buffers = create_stats_buffers(features, norm_map, stats) for key, buffer in stats_buffers.items(): setattr(self, "buffer_" + key.replace(".", "_"), buffer) @@ -136,29 +139,30 @@ def __init__( @torch.no_grad def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) # shallow copy avoids mutating the input batch - for ft in self.features: - if ft.normalization_mode is None: + for key, ft in self.features.items(): + norm_mode = self.norm_map.get(ft.type, None) + if norm_mode is None: continue - buffer = getattr(self, "buffer_" + ft.key.replace(".", "_")) + buffer = getattr(self, "buffer_" + key.replace(".", "_")) - if ft.normalization_mode is NormalizationMode.MEAN_STD: + if norm_mode is NormalizationMode.MEAN_STD: mean = buffer["mean"] std = buffer["std"] assert not torch.isinf(mean).any(), _no_stats_error_str("mean") assert not torch.isinf(std).any(), _no_stats_error_str("std") - batch[ft.key] = (batch[ft.key] - mean) / (std + 1e-8) - elif ft.normalization_mode is NormalizationMode.MIN_MAX: + batch[key] = (batch[key] - mean) / (std + 1e-8) + elif norm_mode is NormalizationMode.MIN_MAX: min = buffer["min"] max = buffer["max"] assert not torch.isinf(min).any(), _no_stats_error_str("min") assert not torch.isinf(max).any(), _no_stats_error_str("max") # normalize to [0,1] - batch[ft.key] = (batch[ft.key] - min) / (max - min + 1e-8) + batch[key] = (batch[key] - min) / (max - min + 1e-8) # normalize to [-1, 1] - batch[ft.key] = batch[ft.key] * 2 - 1 + batch[key] = batch[key] * 2 - 1 else: - raise ValueError(ft.normalization_mode) + raise ValueError(norm_mode) return batch @@ -170,7 +174,8 @@ class Unnormalize(nn.Module): def __init__( self, - features: list[PolicyFeature], + features: dict[str, PolicyFeature], + norm_map: dict[str, NormalizationMode], stats: dict[str, dict[str, Tensor]] | None = None, ): """ @@ -193,9 +198,10 @@ def __init__( """ super().__init__() self.features = features + self.norm_map = norm_map self.stats = stats # `self.buffer_observation_state["mean"]` contains `torch.tensor(state_dim)` - stats_buffers = create_stats_buffers(features, stats) + stats_buffers = create_stats_buffers(features, norm_map, stats) for key, buffer in stats_buffers.items(): setattr(self, "buffer_" + key.replace(".", "_"), buffer) @@ -203,22 +209,23 @@ def __init__( @torch.no_grad def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) # shallow copy avoids mutating the input batch - for ft in self.features: - buffer = getattr(self, "buffer_" + ft.key.replace(".", "_")) + for key, ft in self.features.items(): + norm_mode = self.norm_map.get(ft.type, None) + buffer = getattr(self, "buffer_" + key.replace(".", "_")) - if ft.normalization_mode is NormalizationMode.MEAN_STD: + if norm_mode is NormalizationMode.MEAN_STD: mean = buffer["mean"] std = buffer["std"] assert not torch.isinf(mean).any(), _no_stats_error_str("mean") assert not torch.isinf(std).any(), _no_stats_error_str("std") - batch[ft.key] = batch[ft.key] * std + mean - elif ft.normalization_mode is NormalizationMode.MIN_MAX: + batch[key] = batch[key] * std + mean + elif norm_mode is NormalizationMode.MIN_MAX: min = buffer["min"] max = buffer["max"] assert not torch.isinf(min).any(), _no_stats_error_str("min") assert not torch.isinf(max).any(), _no_stats_error_str("max") - batch[ft.key] = (batch[ft.key] + 1) / 2 - batch[ft.key] = batch[ft.key] * (max - min) + min + batch[key] = (batch[key] + 1) / 2 + batch[key] = batch[key] * (max - min) + min else: - raise ValueError(ft.normalization_mode) + raise ValueError(norm_mode) return batch diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/lerobot/common/policies/tdmpc/configuration_tdmpc.py index e0757e13d7..0d2b046ff4 100644 --- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py +++ b/lerobot/common/policies/tdmpc/configuration_tdmpc.py @@ -201,7 +201,7 @@ def validate_features(self) -> None: ) if len(self.image_features) > 0: - image_ft = next(iter(self.image_features)) + image_ft = next(iter(self.image_features.values())) if image_ft.shape[-2] != image_ft.shape[-1]: # TODO(alexander-soare): This limitation is solely because of code in the random shift # augmentation. It should be able to be removed. diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index f7f4c7b96f..5106b46673 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -36,6 +36,7 @@ from huggingface_hub import PyTorchModelHubMixin from torch import Tensor +from lerobot.common.constants import OBS_ENV, OBS_ROBOT from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.utils import get_device_from_parameters, get_output_shape, populate_queues @@ -79,9 +80,13 @@ def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tenso config.validate_features() self.config = config - self.normalize_inputs = Normalize(config.input_features, dataset_stats) - self.normalize_targets = Normalize(config.output_features, dataset_stats) - self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) + self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) + self.normalize_targets = Normalize( + config.output_features, config.normalization_mapping, dataset_stats + ) + self.unnormalize_outputs = Unnormalize( + config.output_features, config.normalization_mapping, dataset_stats + ) self.model = TDMPCTOLD(config) self.model_target = deepcopy(self.model) @@ -116,7 +121,7 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.image"] = batch[self.config.image_features[0].key] + batch["observation.image"] = batch[next(iter(self.config.image_features))] self._queues = populate_queues(self._queues, batch) @@ -312,7 +317,7 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor | float]: batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.image"] = batch[self.config.image_features[0].key] + batch["observation.image"] = batch[next(iter(self.config.image_features))] batch = self.normalize_targets(batch) info = {} @@ -696,7 +701,12 @@ def __init__(self, config: TDMPCConfig): if config.image_features: self.image_enc_layers = nn.Sequential( - nn.Conv2d(config.image_features[0].shape[0], config.image_encoder_hidden_dim, 7, stride=2), + nn.Conv2d( + next(iter(config.image_features.values())).shape[0], + config.image_encoder_hidden_dim, + 7, + stride=2, + ), nn.ReLU(), nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 5, stride=2), nn.ReLU(), @@ -705,7 +715,7 @@ def __init__(self, config: TDMPCConfig): nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 3, stride=2), nn.ReLU(), ) - dummy_shape = (1, *config.image_features[0].shape) + dummy_shape = (1, *next(iter(config.image_features.values())).shape) out_shape = get_output_shape(self.image_enc_layers, dummy_shape)[1:] self.image_enc_layers.extend( nn.Sequential( @@ -744,12 +754,14 @@ def forward(self, obs_dict: dict[str, Tensor]) -> Tensor: # NOTE: Order of observations matters here. if self.config.image_features: feat.append( - flatten_forward_unflatten(self.image_enc_layers, obs_dict[self.config.image_features[0].key]) + flatten_forward_unflatten( + self.image_enc_layers, obs_dict[next(iter(self.config.image_features))] + ) ) if self.config.env_state_feature: - feat.append(self.env_state_enc_layers(obs_dict[self.config.env_state_feature.key])) + feat.append(self.env_state_enc_layers(obs_dict[OBS_ENV])) if self.config.robot_state_feature: - feat.append(self.state_enc_layers(obs_dict[self.config.robot_state_feature.key])) + feat.append(self.state_enc_layers(obs_dict[OBS_ROBOT])) return torch.stack(feat, dim=0).mean(0) diff --git a/lerobot/common/policies/vqbet/configuration_vqbet.py b/lerobot/common/policies/vqbet/configuration_vqbet.py index 3f87572977..c2a3ca6960 100644 --- a/lerobot/common/policies/vqbet/configuration_vqbet.py +++ b/lerobot/common/policies/vqbet/configuration_vqbet.py @@ -171,21 +171,20 @@ def validate_features(self) -> None: raise ValueError("You must provide only one image among the inputs.") if self.crop_shape is not None: - for image_ft in self.image_features: + for key, image_ft in self.image_features.items(): if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]: raise ValueError( f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} " f"for `crop_shape` and {image_ft.shape} for " - f"`{image_ft.key}`." + f"`{key}`." ) # Check that all input images have the same shape. - first_image_ft = next(iter(self.image_features)) - for image_ft in self.image_features: + first_image_key, first_image_ft = next(iter(self.image_features.items())) + for key, image_ft in self.image_features.items(): if image_ft.shape != first_image_ft.shape: raise ValueError( - f"`{image_ft.key}` does not match `{first_image_ft.key}`, but we " - "expect all image shapes to match." + f"`{key}` does not match `{first_image_key}`, but we " "expect all image shapes to match." ) @property diff --git a/lerobot/common/policies/vqbet/modeling_vqbet.py b/lerobot/common/policies/vqbet/modeling_vqbet.py index aadbd29603..7f19b19e08 100644 --- a/lerobot/common/policies/vqbet/modeling_vqbet.py +++ b/lerobot/common/policies/vqbet/modeling_vqbet.py @@ -65,9 +65,13 @@ def __init__( config.validate_features() self.config = config - self.normalize_inputs = Normalize(config.input_features, dataset_stats) - self.normalize_targets = Normalize(config.output_features, dataset_stats) - self.unnormalize_outputs = Unnormalize(config.output_features, dataset_stats) + self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) + self.normalize_targets = Normalize( + config.output_features, config.normalization_mapping, dataset_stats + ) + self.unnormalize_outputs = Unnormalize( + config.output_features, config.normalization_mapping, dataset_stats + ) self.vqbet = VQBeTModel(config) @@ -135,9 +139,7 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack( - [batch[ft.key] for ft in self.config.image_features], dim=-4 - ) + batch["observation.images"] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) # Note: It's important that this happens after stacking the images into a single key. self._queues = populate_queues(self._queues, batch) @@ -163,9 +165,7 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack( - [batch[ft.key] for ft in self.config.image_features], dim=-4 - ) + batch["observation.images"] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) batch = self.normalize_targets(batch) # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://arxiv.org/pdf/2403.03181) if not self.vqbet.action_head.vqvae_model.discretized.item(): @@ -703,10 +703,9 @@ def __init__(self, config: VQBeTConfig): # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the # height and width from `config.image_features`. - dummy_shape_h_w = ( - config.crop_shape if config.crop_shape is not None else config.image_features[0].shape[1:] - ) - dummy_shape = (1, config.image_features[0].shape[0], *dummy_shape_h_w) + images_shape = next(iter(config.image_features.values())).shape + dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:] + dummy_shape = (1, images_shape[0], *dummy_shape_h_w) feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:] self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index 45af5907f6..cbd5e8b391 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -19,6 +19,7 @@ import platform import random from contextlib import contextmanager +from copy import copy from datetime import datetime, timezone from pathlib import Path from typing import Any, Generator @@ -199,3 +200,13 @@ def log_say(text, play_sounds, blocking=False): if play_sounds: say(text, blocking) + + +def get_channel_first_image_shape(image_shape: tuple) -> tuple: + shape = copy(image_shape) + if shape[2] < shape[0] and shape[2] < shape[1]: # (h, w, c) -> (c, h, w) + shape = (shape[2], shape[0], shape[1]) + elif not (shape[0] < shape[1] and shape[0] < shape[2]): + raise ValueError(image_shape) + + return shape diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index 90bfa00875..abdcaec48b 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -3,34 +3,21 @@ from copy import copy from dataclasses import dataclass, field from pathlib import Path -from pprint import pformat from typing import Type, TypeVar import draccus -import gymnasium as gym from huggingface_hub import ModelHubMixin, hf_hub_download from huggingface_hub.constants import CONFIG_NAME from huggingface_hub.errors import HfHubHTTPError -from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata -from lerobot.common.datasets.utils import flatten_dict, get_nested_item -from lerobot.common.envs.configs import EnvConfig from lerobot.common.optim.optimizers import OptimizerConfig from lerobot.common.optim.schedulers import LRSchedulerConfig -from lerobot.configs.types import FeatureType, NormalizationMode +from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature -# Generic variable that is either ModelHubMixin or a subclass thereof +# Generic variable that is either PretrainedConfig or a subclass thereof T = TypeVar("T", bound="PretrainedConfig") -@dataclass -class PolicyFeature: - key: str - type: FeatureType - shape: list | tuple - normalization_mode: NormalizationMode - - @dataclass class PretrainedConfig(draccus.ChoiceRegistry, ModelHubMixin, abc.ABC): """ @@ -52,6 +39,9 @@ class PretrainedConfig(draccus.ChoiceRegistry, ModelHubMixin, abc.ABC): n_obs_steps: int = 1 normalization_mapping: dict[str, NormalizationMode] = field(default_factory=dict) + input_features: dict[str, PolicyFeature] = field(default_factory=dict) + output_features: dict[str, PolicyFeature] = field(default_factory=dict) + def __post_init__(self): self.type = self.get_choice_name(self.__class__) self.pretrained_path = None @@ -81,17 +71,29 @@ def validate_features(self) -> None: raise NotImplementedError @property - def input_features(self) -> list[PolicyFeature]: - input_features = [] - for ft in [self.robot_state_feature, self.env_state_feature, *self.image_features]: - if ft is not None: - input_features.append(ft) + def robot_state_feature(self) -> PolicyFeature | None: + for _, ft in self.input_features.items(): + if ft.type is FeatureType.STATE: + return ft + return None - return input_features + @property + def env_state_feature(self) -> PolicyFeature | None: + for _, ft in self.input_features.items(): + if ft.type is FeatureType.ENV: + return ft + return None + + @property + def image_features(self) -> dict[str, PolicyFeature]: + return {key: ft for key, ft in self.input_features.items() if ft.type is FeatureType.VISUAL} @property - def output_features(self) -> list[PolicyFeature]: - return [self.action_feature] + def action_feature(self) -> PolicyFeature | None: + for _, ft in self.output_features.items(): + if ft.type is FeatureType.ACTION: + return ft + return None def _save_pretrained(self, save_directory: Path) -> None: to_save = copy(self) @@ -141,135 +143,3 @@ def from_pretrained( cli_overrides = model_kwargs.pop("cli_overrides", []) instance = draccus.parse(cls, config_file, args=[]) return draccus.parse(instance.__class__, config_file, args=cli_overrides) - - def parse_features_from_dataset(self, ds_meta: LeRobotDatasetMetadata): - # TODO(aliberts): Implement PolicyFeature in LeRobotDataset and remove the need for this - robot_state_features = [] - env_state_features = [] - action_features = [] - image_features = [] - - for key in ds_meta.features: - if key in ds_meta.camera_keys: - shape = ds_meta.features[key]["shape"] - names = ds_meta.features[key]["names"] - if len(shape) != 3: - raise ValueError(f"Number of dimensions of {key} != 3 (shape={shape})") - # Backward compatibility for "channel" which is an error introduced in LeRobotDataset v2.0 for ported datasets. - if names[2] in ["channel", "channels"]: # (h, w, c) -> (c, h, w) - shape = (shape[2], shape[0], shape[1]) - image_features.append( - PolicyFeature( - key=key, - type=FeatureType.VISUAL, - shape=shape, - normalization_mode=self.normalization_mapping[FeatureType.VISUAL], - ) - ) - elif key == "observation.environment_state": - env_state_features.append( - PolicyFeature( - key=key, - type=FeatureType.ENV, - shape=ds_meta.features[key]["shape"], - normalization_mode=self.normalization_mapping[FeatureType.ENV], - ) - ) - elif key.startswith("observation"): - robot_state_features.append( - PolicyFeature( - key=key, - type=FeatureType.STATE, - shape=ds_meta.features[key]["shape"], - normalization_mode=self.normalization_mapping[FeatureType.STATE], - ) - ) - elif key == "action": - action_features.append( - PolicyFeature( - key=key, - type=FeatureType.ACTION, - shape=ds_meta.features[key]["shape"], - normalization_mode=self.normalization_mapping[FeatureType.ACTION], - ) - ) - - if len(robot_state_features) > 1: - raise ValueError( - "Found multiple features for the robot's state. Please select only one or concatenate them." - f"Robot state features found:\n{pformat(robot_state_features)}" - ) - - if len(env_state_features) > 1: - raise ValueError( - "Found multiple features for the env's state. Please select only one or concatenate them." - f"Env state features found:\n{pformat(env_state_features)}" - ) - - if len(action_features) > 1: - raise ValueError( - "Found multiple features for the action. Please select only one or concatenate them." - f"Action features found:\n{pformat(action_features)}" - ) - - self.robot_state_feature = robot_state_features[0] if len(robot_state_features) == 1 else None - self.env_state_feature = env_state_features[0] if len(env_state_features) == 1 else None - self.action_feature = action_features[0] if len(action_features) == 1 else None - self.image_features = image_features - - def parse_features_from_env(self, env: gym.Env, env_cfg: EnvConfig): - robot_state_features = [] - env_state_features = [] - action_features = [] - image_features = [] - - flat_dict = flatten_dict(env_cfg.feature_types) - - for key, _type in flat_dict.items(): - env_ft = ( - env.action_space - if _type is FeatureType.ACTION - else get_nested_item(env.observation_space, key) - ) - shape = env_ft.shape[1:] - if _type is FeatureType.VISUAL: - h, w, c = shape - if not c < h and c < w: - raise ValueError( - f"Expect channel last images for visual feature {key} of {env_cfg.type} env, but instead got {shape=}" - ) - shape = (c, h, w) - - feature = PolicyFeature( - key=key, - type=_type, - shape=shape, - normalization_mode=self.normalization_mapping[_type], - ) - if _type is FeatureType.VISUAL: - image_features.append(feature) - elif _type is FeatureType.STATE: - robot_state_features.append(feature) - elif _type is FeatureType.ENV: - env_state_features.append(feature) - elif _type is FeatureType.ACTION: - action_features.append(feature) - - # TODO(aliberts, rcadene): remove this hardcoding of keys and just use the nested keys as is - # (need to also refactor preprocess_observation and externalize normalization from policies) - for ft in image_features: - if len(ft.key.split("/")) > 1: - ft.key = f"observation.images.{ft.key.split('/')[-1]}" - elif len(ft.key.split("/")) == 1: - image_features[0].key = "observation.image" - - if len(robot_state_features) == 1: - robot_state_features[0].key = "observation.state" - - if len(env_state_features) == 1: - env_state_features[0].key = "observation.environment_state" - - self.robot_state_feature = robot_state_features[0] if len(robot_state_features) == 1 else None - self.env_state_feature = env_state_features[0] if len(env_state_features) == 1 else None - self.action_feature = action_features[0] if len(action_features) == 1 else None - self.image_features = image_features diff --git a/lerobot/configs/types.py b/lerobot/configs/types.py index a5f6ac4fba..f31f437b1e 100644 --- a/lerobot/configs/types.py +++ b/lerobot/configs/types.py @@ -1,5 +1,6 @@ # Note: We subclass str so that serialization is straightforward # https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json +from dataclasses import dataclass from enum import Enum from typing import Any, Protocol @@ -18,3 +19,9 @@ class NormalizationMode(str, Enum): class DictLike(Protocol): def __getitem__(self, key: Any) -> Any: ... + + +@dataclass +class PolicyFeature: + type: FeatureType + shape: tuple diff --git a/tests/test_policies.py b/tests/test_policies.py index bfb82d024d..4d05af93c3 100644 --- a/tests/test_policies.py +++ b/tests/test_policies.py @@ -25,7 +25,7 @@ from lerobot import available_policies from lerobot.common.datasets.factory import make_dataset -from lerobot.common.datasets.utils import cycle +from lerobot.common.datasets.utils import cycle, dataset_to_policy_features from lerobot.common.envs.factory import make_env, make_env_config from lerobot.common.envs.utils import preprocess_observation from lerobot.common.optim.factory import make_optimizer_and_scheduler @@ -39,9 +39,8 @@ from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import seeded_context from lerobot.configs.default import DatasetConfig -from lerobot.configs.policies import PolicyFeature from lerobot.configs.training import TrainPipelineConfig -from lerobot.configs.types import FeatureType, NormalizationMode +from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature from tests.scripts.save_policy_to_safetensors import get_policy_stats from tests.utils import DEVICE, require_cpu, require_env, require_x86_64_kernel @@ -240,7 +239,11 @@ def test_policy_defaults(dummy_dataset_metadata, policy_name: str): """Check that the policy can be instantiated with defaults.""" policy_cls = get_policy_class(policy_name) policy_cfg = make_policy_config(policy_name) - policy_cfg.parse_features_from_dataset(dummy_dataset_metadata) + features = dataset_to_policy_features(dummy_dataset_metadata.features) + policy_cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} + policy_cfg.input_features = { + key: ft for key, ft in features.items() if key not in policy_cfg.output_features + } policy_cls(policy_cfg) @@ -248,7 +251,11 @@ def test_policy_defaults(dummy_dataset_metadata, policy_name: str): def test_save_and_load_pretrained(dummy_dataset_metadata, tmp_path, policy_name: str): policy_cls = get_policy_class(policy_name) policy_cfg = make_policy_config(policy_name) - policy_cfg.parse_features_from_dataset(dummy_dataset_metadata) + features = dataset_to_policy_features(dummy_dataset_metadata.features) + policy_cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} + policy_cfg.input_features = { + key: ft for key, ft in features.items() if key not in policy_cfg.output_features + } policy = policy_cls(policy_cfg) save_dir = tmp_path / f"test_save_and_load_pretrained_{policy_cls.__name__}" policy.save_pretrained(save_dir) @@ -266,28 +273,28 @@ def test_normalize(insert_temporal_dim): expected. """ - input_features = [ - PolicyFeature( - key="observation.image", + input_features = { + "observation.image": PolicyFeature( type=FeatureType.VISUAL, - normalization_mode=NormalizationMode.MEAN_STD, - shape=[3, 96, 96], + shape=(3, 96, 96), ), - PolicyFeature( - key="observation.state", + "observation.state": PolicyFeature( type=FeatureType.STATE, - normalization_mode=NormalizationMode.MIN_MAX, - shape=[10], + shape=(10,), ), - ] - output_features = [ - PolicyFeature( - key="action", + } + output_features = { + "action": PolicyFeature( type=FeatureType.ACTION, - normalization_mode=NormalizationMode.MIN_MAX, - shape=[5], + shape=(5,), ), - ] + } + + norm_map = { + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MIN_MAX, + "ACTION": NormalizationMode.MIN_MAX, + } dataset_stats = { "observation.image": { @@ -330,30 +337,30 @@ def test_normalize(insert_temporal_dim): output_batch[key] = torch.stack([output_batch[key]] * tdim, dim=1) # test without stats - normalize = Normalize(input_features, stats=None) + normalize = Normalize(input_features, norm_map, stats=None) with pytest.raises(AssertionError): normalize(input_batch) # test with stats - normalize = Normalize(input_features, stats=dataset_stats) + normalize = Normalize(input_features, norm_map, stats=dataset_stats) normalize(input_batch) # test loading pretrained models - new_normalize = Normalize(input_features, stats=None) + new_normalize = Normalize(input_features, norm_map, stats=None) new_normalize.load_state_dict(normalize.state_dict()) new_normalize(input_batch) # test without stats - unnormalize = Unnormalize(output_features, stats=None) + unnormalize = Unnormalize(output_features, norm_map, stats=None) with pytest.raises(AssertionError): unnormalize(output_batch) # test with stats - unnormalize = Unnormalize(output_features, stats=dataset_stats) + unnormalize = Unnormalize(output_features, norm_map, stats=dataset_stats) unnormalize(output_batch) # test loading pretrained models - new_unnormalize = Unnormalize(output_features, stats=None) + new_unnormalize = Unnormalize(output_features, norm_map, stats=None) new_unnormalize.load_state_dict(unnormalize.state_dict()) unnormalize(output_batch) @@ -487,7 +494,3 @@ def test_act_temporal_ensembler(): assert torch.all(offline_avg <= einops.reduce(seq_slice, "b s 1 -> b 1", "max")) # Selected atol=1e-4 keeping in mind actions in [-1, 1] and excepting 0.01% error. assert torch.allclose(online_avg, offline_avg, atol=1e-4) - - -if __name__ == "__main__": - test_act_temporal_ensembler() From 3623feb992be994a212161568329c86d380d65ac Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 16 Jan 2025 17:57:38 +0100 Subject: [PATCH 33/94] Fix env_to_policy_features call --- lerobot/common/policies/factory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index e89c9a7266..0508d25dde 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -111,7 +111,7 @@ def make_policy( "The policy must have already existing features in its config when initializing it " "with an environment." ) - features = env_to_policy_features(env_cfg, cfg) + features = env_to_policy_features(env_cfg) cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} cfg.input_features = {key: ft for key, ft in features.items() if key not in cfg.output_features} From e11e7623f2e6d5cf9926bd8c114236f741681694 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 17 Jan 2025 11:07:52 +0100 Subject: [PATCH 34/94] Fix wandb init --- lerobot/common/logger.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 357d6fd83c..b589e94b1e 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -121,7 +121,7 @@ def __init__(self, cfg: TrainPipelineConfig): notes=cfg.wandb.notes, tags=cfg_to_group(cfg, return_list=True), dir=self.log_dir, - config=asdict(self.cfg), + config=asdict(self._cfg), # TODO(rcadene): try set to True save_code=False, # TODO(rcadene): split train and eval, and run async eval with job_type="eval" From ae44cd05a44f37f46d335a0b41a0871e6eb3600f Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 17 Jan 2025 11:23:30 +0100 Subject: [PATCH 35/94] remove omegaconf --- poetry.lock | 27 +-------------------------- pyproject.toml | 1 - 2 files changed, 1 insertion(+), 27 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0b71c083f1..56fcc51c10 100644 --- a/poetry.lock +++ b/poetry.lock @@ -158,16 +158,6 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[[package]] -name = "antlr4-python3-runtime" -version = "4.9.3" -description = "ANTLR 4.9.3 runtime for Python 3.7" -optional = false -python-versions = "*" -files = [ - {file = "antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b"}, -] - [[package]] name = "anyio" version = "4.8.0" @@ -4175,21 +4165,6 @@ files = [ {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, ] -[[package]] -name = "omegaconf" -version = "2.3.0" -description = "A flexible configuration library" -optional = false -python-versions = ">=3.6" -files = [ - {file = "omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b"}, - {file = "omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7"}, -] - -[package.dependencies] -antlr4-python3-runtime = "==4.9.*" -PyYAML = ">=5.1.0" - [[package]] name = "open3d" version = "0.18.0" @@ -7673,4 +7648,4 @@ xarm = ["gym-xarm"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "df9042654c525f96e88e868d95f8746d69f3c30b00cdf064625df8aa540ea488" +content-hash = "3d77328d3ec38a7ff7fd6414094cb3cd4bfca9bd266fffc21627505cbe56870d" diff --git a/pyproject.toml b/pyproject.toml index c1b791d731..b130c44587 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,6 @@ packages = [{include = "lerobot"}] [tool.poetry.dependencies] python = ">=3.10,<3.13" termcolor = ">=2.4.0" -omegaconf = ">=2.3.0" wandb = ">=0.16.3" imageio = {extras = ["ffmpeg"], version = ">=2.34.0"} gdown = ">=5.1.0" From 340ed02d22fbebdd2e0543cf45e29339dea34f11 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 20 Jan 2025 09:01:37 +0100 Subject: [PATCH 36/94] Add branch arg --- lerobot/scripts/push_pretrained.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lerobot/scripts/push_pretrained.py b/lerobot/scripts/push_pretrained.py index 68360339b7..8472bf98a1 100644 --- a/lerobot/scripts/push_pretrained.py +++ b/lerobot/scripts/push_pretrained.py @@ -37,6 +37,7 @@ class PushPretrainedConfig: pretrained_path: Path repo_id: str + branch: str | None = None private: bool = False exist_ok: bool = False @@ -53,6 +54,7 @@ def main(cfg: PushPretrainedConfig): repo_id=cfg.repo_id, folder_path=cfg.pretrained_path, repo_type="model", + revision=cfg.branch, ) From 14008abd37acd4e5c37ec99cc52ee8aff9aedfd3 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 21 Jan 2025 09:29:07 +0100 Subject: [PATCH 37/94] Move deprecated --- lerobot/configs/{ => _deprecated}/default.yaml | 0 .../configs/{ => _deprecated}/policy/act.yaml | 0 .../policy/act_aloha_real.yaml | 0 .../{ => _deprecated}/policy/act_koch_real.yaml | 0 .../{ => _deprecated}/policy/act_moss_real.yaml | 0 .../policy/act_so100_real.yaml | 0 .../{ => _deprecated}/policy/diffusion.yaml | 0 .../policy/diffusion_pusht_keypoints.yaml | 0 .../configs/{ => _deprecated}/policy/tdmpc.yaml | 0 .../policy/tdmpc_pusht_keypoints.yaml | 0 .../configs/{ => _deprecated}/policy/vqbet.yaml | 0 .../configs/{ => _deprecated}/robot/aloha.yaml | 0 .../configs/{ => _deprecated}/robot/koch.yaml | 0 .../{ => _deprecated}/robot/koch_bimanual.yaml | 0 .../configs/{ => _deprecated}/robot/moss.yaml | 0 .../configs/{ => _deprecated}/robot/so100.yaml | 0 .../{ => _deprecated}/robot/stretch.yaml | 0 lerobot/configs/env/aloha.yaml | 14 -------------- lerobot/configs/env/aloha_real.yaml | 10 ---------- lerobot/configs/env/dora_aloha_real.yaml | 13 ------------- lerobot/configs/env/koch_real.yaml | 10 ---------- lerobot/configs/env/moss_real.yaml | 10 ---------- lerobot/configs/env/pusht.yaml | 17 ----------------- lerobot/configs/env/so100_real.yaml | 10 ---------- lerobot/configs/env/xarm.yaml | 17 ----------------- 25 files changed, 101 deletions(-) rename lerobot/configs/{ => _deprecated}/default.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/act.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/act_aloha_real.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/act_koch_real.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/act_moss_real.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/act_so100_real.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/diffusion.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/diffusion_pusht_keypoints.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/tdmpc.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/tdmpc_pusht_keypoints.yaml (100%) rename lerobot/configs/{ => _deprecated}/policy/vqbet.yaml (100%) rename lerobot/configs/{ => _deprecated}/robot/aloha.yaml (100%) rename lerobot/configs/{ => _deprecated}/robot/koch.yaml (100%) rename lerobot/configs/{ => _deprecated}/robot/koch_bimanual.yaml (100%) rename lerobot/configs/{ => _deprecated}/robot/moss.yaml (100%) rename lerobot/configs/{ => _deprecated}/robot/so100.yaml (100%) rename lerobot/configs/{ => _deprecated}/robot/stretch.yaml (100%) delete mode 100644 lerobot/configs/env/aloha.yaml delete mode 100644 lerobot/configs/env/aloha_real.yaml delete mode 100644 lerobot/configs/env/dora_aloha_real.yaml delete mode 100644 lerobot/configs/env/koch_real.yaml delete mode 100644 lerobot/configs/env/moss_real.yaml delete mode 100644 lerobot/configs/env/pusht.yaml delete mode 100644 lerobot/configs/env/so100_real.yaml delete mode 100644 lerobot/configs/env/xarm.yaml diff --git a/lerobot/configs/default.yaml b/lerobot/configs/_deprecated/default.yaml similarity index 100% rename from lerobot/configs/default.yaml rename to lerobot/configs/_deprecated/default.yaml diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/_deprecated/policy/act.yaml similarity index 100% rename from lerobot/configs/policy/act.yaml rename to lerobot/configs/_deprecated/policy/act.yaml diff --git a/lerobot/configs/policy/act_aloha_real.yaml b/lerobot/configs/_deprecated/policy/act_aloha_real.yaml similarity index 100% rename from lerobot/configs/policy/act_aloha_real.yaml rename to lerobot/configs/_deprecated/policy/act_aloha_real.yaml diff --git a/lerobot/configs/policy/act_koch_real.yaml b/lerobot/configs/_deprecated/policy/act_koch_real.yaml similarity index 100% rename from lerobot/configs/policy/act_koch_real.yaml rename to lerobot/configs/_deprecated/policy/act_koch_real.yaml diff --git a/lerobot/configs/policy/act_moss_real.yaml b/lerobot/configs/_deprecated/policy/act_moss_real.yaml similarity index 100% rename from lerobot/configs/policy/act_moss_real.yaml rename to lerobot/configs/_deprecated/policy/act_moss_real.yaml diff --git a/lerobot/configs/policy/act_so100_real.yaml b/lerobot/configs/_deprecated/policy/act_so100_real.yaml similarity index 100% rename from lerobot/configs/policy/act_so100_real.yaml rename to lerobot/configs/_deprecated/policy/act_so100_real.yaml diff --git a/lerobot/configs/policy/diffusion.yaml b/lerobot/configs/_deprecated/policy/diffusion.yaml similarity index 100% rename from lerobot/configs/policy/diffusion.yaml rename to lerobot/configs/_deprecated/policy/diffusion.yaml diff --git a/lerobot/configs/policy/diffusion_pusht_keypoints.yaml b/lerobot/configs/_deprecated/policy/diffusion_pusht_keypoints.yaml similarity index 100% rename from lerobot/configs/policy/diffusion_pusht_keypoints.yaml rename to lerobot/configs/_deprecated/policy/diffusion_pusht_keypoints.yaml diff --git a/lerobot/configs/policy/tdmpc.yaml b/lerobot/configs/_deprecated/policy/tdmpc.yaml similarity index 100% rename from lerobot/configs/policy/tdmpc.yaml rename to lerobot/configs/_deprecated/policy/tdmpc.yaml diff --git a/lerobot/configs/policy/tdmpc_pusht_keypoints.yaml b/lerobot/configs/_deprecated/policy/tdmpc_pusht_keypoints.yaml similarity index 100% rename from lerobot/configs/policy/tdmpc_pusht_keypoints.yaml rename to lerobot/configs/_deprecated/policy/tdmpc_pusht_keypoints.yaml diff --git a/lerobot/configs/policy/vqbet.yaml b/lerobot/configs/_deprecated/policy/vqbet.yaml similarity index 100% rename from lerobot/configs/policy/vqbet.yaml rename to lerobot/configs/_deprecated/policy/vqbet.yaml diff --git a/lerobot/configs/robot/aloha.yaml b/lerobot/configs/_deprecated/robot/aloha.yaml similarity index 100% rename from lerobot/configs/robot/aloha.yaml rename to lerobot/configs/_deprecated/robot/aloha.yaml diff --git a/lerobot/configs/robot/koch.yaml b/lerobot/configs/_deprecated/robot/koch.yaml similarity index 100% rename from lerobot/configs/robot/koch.yaml rename to lerobot/configs/_deprecated/robot/koch.yaml diff --git a/lerobot/configs/robot/koch_bimanual.yaml b/lerobot/configs/_deprecated/robot/koch_bimanual.yaml similarity index 100% rename from lerobot/configs/robot/koch_bimanual.yaml rename to lerobot/configs/_deprecated/robot/koch_bimanual.yaml diff --git a/lerobot/configs/robot/moss.yaml b/lerobot/configs/_deprecated/robot/moss.yaml similarity index 100% rename from lerobot/configs/robot/moss.yaml rename to lerobot/configs/_deprecated/robot/moss.yaml diff --git a/lerobot/configs/robot/so100.yaml b/lerobot/configs/_deprecated/robot/so100.yaml similarity index 100% rename from lerobot/configs/robot/so100.yaml rename to lerobot/configs/_deprecated/robot/so100.yaml diff --git a/lerobot/configs/robot/stretch.yaml b/lerobot/configs/_deprecated/robot/stretch.yaml similarity index 100% rename from lerobot/configs/robot/stretch.yaml rename to lerobot/configs/_deprecated/robot/stretch.yaml diff --git a/lerobot/configs/env/aloha.yaml b/lerobot/configs/env/aloha.yaml deleted file mode 100644 index 296a4481c4..0000000000 --- a/lerobot/configs/env/aloha.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# @package _global_ - -fps: 50 - -env: - name: aloha - task: AlohaInsertion-v0 - state_dim: 14 - action_dim: 14 - fps: ${fps} - episode_length: 400 - gym: - obs_type: pixels_agent_pos - render_mode: rgb_array diff --git a/lerobot/configs/env/aloha_real.yaml b/lerobot/configs/env/aloha_real.yaml deleted file mode 100644 index 57af4be20b..0000000000 --- a/lerobot/configs/env/aloha_real.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# @package _global_ - -fps: 30 - -env: - name: real_world - task: null - state_dim: 18 - action_dim: 18 - fps: ${fps} diff --git a/lerobot/configs/env/dora_aloha_real.yaml b/lerobot/configs/env/dora_aloha_real.yaml deleted file mode 100644 index 088781d4ed..0000000000 --- a/lerobot/configs/env/dora_aloha_real.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# @package _global_ - -fps: 30 - -env: - name: dora - task: DoraAloha-v0 - state_dim: 14 - action_dim: 14 - fps: ${fps} - episode_length: 400 - gym: - fps: ${fps} diff --git a/lerobot/configs/env/koch_real.yaml b/lerobot/configs/env/koch_real.yaml deleted file mode 100644 index 8e65d72f4e..0000000000 --- a/lerobot/configs/env/koch_real.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# @package _global_ - -fps: 30 - -env: - name: real_world - task: null - state_dim: 6 - action_dim: 6 - fps: ${fps} diff --git a/lerobot/configs/env/moss_real.yaml b/lerobot/configs/env/moss_real.yaml deleted file mode 100644 index 8e65d72f4e..0000000000 --- a/lerobot/configs/env/moss_real.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# @package _global_ - -fps: 30 - -env: - name: real_world - task: null - state_dim: 6 - action_dim: 6 - fps: ${fps} diff --git a/lerobot/configs/env/pusht.yaml b/lerobot/configs/env/pusht.yaml deleted file mode 100644 index 771fbbf4d4..0000000000 --- a/lerobot/configs/env/pusht.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# @package _global_ - -fps: 10 - -env: - name: pusht - task: PushT-v0 - image_size: 96 - state_dim: 2 - action_dim: 2 - fps: ${fps} - episode_length: 300 - gym: - obs_type: pixels_agent_pos - render_mode: rgb_array - visualization_width: 384 - visualization_height: 384 diff --git a/lerobot/configs/env/so100_real.yaml b/lerobot/configs/env/so100_real.yaml deleted file mode 100644 index 8e65d72f4e..0000000000 --- a/lerobot/configs/env/so100_real.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# @package _global_ - -fps: 30 - -env: - name: real_world - task: null - state_dim: 6 - action_dim: 6 - fps: ${fps} diff --git a/lerobot/configs/env/xarm.yaml b/lerobot/configs/env/xarm.yaml deleted file mode 100644 index 4320379aee..0000000000 --- a/lerobot/configs/env/xarm.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# @package _global_ - -fps: 15 - -env: - name: xarm - task: XarmLift-v0 - image_size: 84 - state_dim: 4 - action_dim: 4 - fps: ${fps} - episode_length: 200 - gym: - obs_type: pixels_agent_pos - render_mode: rgb_array - visualization_width: 384 - visualization_height: 384 From 95a167035388b8c0031030a16b0f78ed2de9954d Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 21 Jan 2025 09:33:09 +0100 Subject: [PATCH 38/94] Move training config --- lerobot/common/datasets/factory.py | 15 +++++---------- lerobot/common/logger.py | 2 +- lerobot/common/optim/factory.py | 2 +- lerobot/configs/{training.py => train.py} | 0 lerobot/scripts/train.py | 2 +- tests/scripts/save_policy_to_safetensors.py | 2 +- tests/test_control_robot.py | 2 +- tests/test_datasets.py | 2 +- tests/test_policies.py | 2 +- 9 files changed, 12 insertions(+), 17 deletions(-) rename lerobot/configs/{training.py => train.py} (100%) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index e8f047908c..a507c7d479 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -25,7 +25,7 @@ ) from lerobot.common.datasets.transforms import ImageTransforms from lerobot.configs.policies import PretrainedConfig -from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.train import TrainPipelineConfig IMAGENET_STATS = { "mean": [[[0.485]], [[0.456]], [[0.406]]], # (c,1,1) @@ -59,16 +59,10 @@ def resolve_delta_timestamps( def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDataset: """ Args: - cfg: A Hydra config as per the LeRobot config scheme. - split: Select the data subset used to create an instance of LeRobotDataset. - All datasets hosted on [lerobot](https://huggingface.co/lerobot) contain only one subset: "train". - Thus, by default, `split="train"` selects all the available data. `split` aims to work like the - slicer in the hugging face datasets: - https://huggingface.co/docs/datasets/v2.19.0/loading#slice-splits - As of now, it only supports `split="train[:n]"` to load the first n frames of the dataset or - `split="train[n:]"` to load the last n frames. For instance `split="train[:1000]"`. + cfg: A TrainPipelineConfig config which contains a DatasetConfig and a PretrainedConfig. + Returns: - The LeRobotDataset. + A LeRobotDataset. """ image_transforms = ( ImageTransforms(cfg.dataset.image_transforms) if cfg.dataset.image_transforms.enable else None @@ -86,6 +80,7 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas local_files_only=cfg.dataset.local_files_only, ) else: + raise NotImplementedError("The MultiLeRobotDataset isn't supported for now.") dataset = MultiLeRobotDataset( cfg.dataset.repo_id, # TODO(aliberts): add proper support for multi dataset diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index b589e94b1e..2127342d1f 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -34,7 +34,7 @@ from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import get_global_random_state -from lerobot.configs.training import TRAIN_CONFIG_FILE, TrainPipelineConfig +from lerobot.configs.train import TRAIN_CONFIG_FILE, TrainPipelineConfig from lerobot.configs.types import FeatureType, NormalizationMode PRETRAINED_MODEL = "pretrained_model" diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py index c81579cd4f..5d8fdb57af 100644 --- a/lerobot/common/optim/factory.py +++ b/lerobot/common/optim/factory.py @@ -23,7 +23,7 @@ from lerobot.common.logger import TRAINING_STATE from lerobot.common.policies import Policy from lerobot.common.utils.utils import get_global_random_state, set_global_random_state -from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.train import TrainPipelineConfig def make_optimizer_and_scheduler( diff --git a/lerobot/configs/training.py b/lerobot/configs/train.py similarity index 100% rename from lerobot/configs/training.py rename to lerobot/configs/train.py diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 4ac9b1c9ca..0641483386 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -44,7 +44,7 @@ set_global_seed, ) from lerobot.configs import parser -from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.train import TrainPipelineConfig from lerobot.scripts.eval import eval_policy diff --git a/tests/scripts/save_policy_to_safetensors.py b/tests/scripts/save_policy_to_safetensors.py index 381afebfa8..b921b4738a 100644 --- a/tests/scripts/save_policy_to_safetensors.py +++ b/tests/scripts/save_policy_to_safetensors.py @@ -24,7 +24,7 @@ from lerobot.common.policies.factory import make_policy, make_policy_config from lerobot.common.utils.utils import set_global_seed from lerobot.configs.default import DatasetConfig -from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.train import TrainPipelineConfig def get_policy_stats(ds_repo_id, env_name, policy_name, policy_kwargs, train_kwargs): diff --git a/tests/test_control_robot.py b/tests/test_control_robot.py index 2d460b360a..be170071fe 100644 --- a/tests/test_control_robot.py +++ b/tests/test_control_robot.py @@ -40,7 +40,7 @@ ) from lerobot.configs.default import DatasetConfig from lerobot.configs.policies import PretrainedConfig -from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.train import TrainPipelineConfig from lerobot.scripts.control_robot import calibrate, record, replay, teleoperate from tests.test_robots import make_robot from tests.utils import DEVICE, TEST_ROBOT_TYPES, mock_calibration_dir, require_robot diff --git a/tests/test_datasets.py b/tests/test_datasets.py index 56adf07475..d0ca158d94 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -48,7 +48,7 @@ from lerobot.common.robot_devices.robots.utils import make_robot from lerobot.common.utils.utils import seeded_context from lerobot.configs.default import DatasetConfig -from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.train import TrainPipelineConfig from tests.fixtures.constants import DUMMY_REPO_ID from tests.utils import DEVICE, require_x86_64_kernel diff --git a/tests/test_policies.py b/tests/test_policies.py index 4d05af93c3..6366f399a9 100644 --- a/tests/test_policies.py +++ b/tests/test_policies.py @@ -39,7 +39,7 @@ from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import seeded_context from lerobot.configs.default import DatasetConfig -from lerobot.configs.training import TrainPipelineConfig +from lerobot.configs.train import TrainPipelineConfig from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature from tests.scripts.save_policy_to_safetensors import get_policy_stats from tests.utils import DEVICE, require_cpu, require_env, require_x86_64_kernel From b953595aaac53e816b013ae9d0592d22dd67b7ed Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 21 Jan 2025 09:33:50 +0100 Subject: [PATCH 39/94] Remove pathable_args --- lerobot/configs/parser.py | 2 +- lerobot/scripts/eval.py | 2 +- lerobot/scripts/train.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lerobot/configs/parser.py b/lerobot/configs/parser.py index c2f6c1a0ea..9087957493 100644 --- a/lerobot/configs/parser.py +++ b/lerobot/configs/parser.py @@ -69,7 +69,7 @@ def preprocess_path_args(fields: Field | list[Field], args: Sequence[str] | None return filtered_args -def wrap(config_path: Path | None = None, pathable_args: str | list[str] | None = None): +def wrap(config_path: Path | None = None): """ HACK: Similar to draccus.wrap but adds a preprocessing of CLI args in order to overload specific parts of the config from a config file at that particular nesting level. diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index ad5db2478b..4054dcd825 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -439,7 +439,7 @@ def _compile_episode_data( return data_dict -@parser.wrap(pathable_args=["policy"]) +@parser.wrap() def eval(cfg: EvalPipelineConfig): init_logging() logging.info(pformat(asdict(cfg))) diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 0641483386..9933666b52 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -180,7 +180,7 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): logger.log_dict(info, step, mode="eval") -@parser.wrap(pathable_args=["policy"]) +@parser.wrap() def train(cfg: TrainPipelineConfig): init_logging() logging.info(pformat(asdict(cfg))) From 3bfdc5e980b975f8f5bd5ad817bcff759188d5b0 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 17:32:05 +0100 Subject: [PATCH 40/94] Implement custom HubMixin --- lerobot/common/utils/hub.py | 188 ++++++++++++++++++++++++++++++++++++ lerobot/configs/policies.py | 22 ++--- 2 files changed, 198 insertions(+), 12 deletions(-) create mode 100644 lerobot/common/utils/hub.py diff --git a/lerobot/common/utils/hub.py b/lerobot/common/utils/hub.py new file mode 100644 index 0000000000..63fcf918d2 --- /dev/null +++ b/lerobot/common/utils/hub.py @@ -0,0 +1,188 @@ +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Any, Type, TypeVar + +from huggingface_hub import HfApi +from huggingface_hub.utils import validate_hf_hub_args + +T = TypeVar("T", bound="HubMixin") + + +class HubMixin: + """ + A Mixin containing the functionality to push an object to the hub. + + This is similar to huggingface_hub.ModelHubMixin but is lighter and makes less assumptions about its + subclasses (in particular, the fact that it's not necessarily a model). + + The inheriting classes must implement '_save_pretrained' and 'from_pretrained'. + """ + + def save_pretrained( + self, + save_directory: str | Path, + *, + repo_id: str | None = None, + push_to_hub: bool = False, + card_kwargs: dict[str, Any] | None = None, + **push_to_hub_kwargs, + ) -> str | None: + """ + Save object in local directory. + + Args: + save_directory (`str` or `Path`): + Path to directory in which the object will be saved. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your object to the Huggingface Hub after saving it. + repo_id (`str`, *optional*): + ID of your repository on the Hub. Used only if `push_to_hub=True`. Will default to the folder name if + not provided. + card_kwargs (`Dict[str, Any]`, *optional*): + Additional arguments passed to the card template to customize the card. + push_to_hub_kwargs: + Additional key word arguments passed along to the [`~HubMixin.push_to_hub`] method. + Returns: + `str` or `None`: url of the commit on the Hub if `push_to_hub=True`, `None` otherwise. + """ + save_directory = Path(save_directory) + save_directory.mkdir(parents=True, exist_ok=True) + + # save object (weights, files, etc.) + self._save_pretrained(save_directory) + + # push to the Hub if required + if push_to_hub: + if repo_id is None: + repo_id = save_directory.name # Defaults to `save_directory` name + return self.push_to_hub(repo_id=repo_id, card_kwargs=card_kwargs, **push_to_hub_kwargs) + return None + + def _save_pretrained(self, save_directory: Path) -> None: + """ + Overwrite this method in subclass to define how to save your object. + + Args: + save_directory (`str` or `Path`): + Path to directory in which the object files will be saved. + """ + raise NotImplementedError + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls: Type[T], + pretrained_name_or_path: str | Path, + *, + force_download: bool = False, + resume_download: bool | None = None, + proxies: dict | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + **kwargs, + ) -> T: + """ + Download the object from the Huggingface Hub and instantiate it. + + Args: + pretrained_name_or_path (`str`, `Path`): + - Either the `repo_id` (string) of the object hosted on the Hub, e.g. `lerobot/diffusion_pusht`. + - Or a path to a `directory` containing the object files saved using `.save_pretrained`, + e.g., `../path/to/my_model_directory/`. + revision (`str`, *optional*): + Revision on the Hub. Can be a branch name, a git tag or any commit id. + Defaults to the latest commit on `main` branch. + force_download (`bool`, *optional*, defaults to `False`): + Whether to force (re-)downloading the files from the Hub, overriding the existing cache. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on every request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. By default, it will use the token + cached when running `huggingface-cli login`. + cache_dir (`str`, `Path`, *optional*): + Path to the folder where cached files are stored. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, avoid downloading the file and return the path to the local cached file if it exists. + kwargs (`Dict`, *optional*): + Additional kwargs to pass to the object during initialization. + """ + raise NotImplementedError + + @validate_hf_hub_args + def push_to_hub( + self, + repo_id: str, + *, + commit_message: str | None = None, + private: bool | None = None, + token: str | None = None, + branch: str | None = None, + create_pr: bool | None = None, + allow_patterns: list[str] | str | None = None, + ignore_patterns: list[str] | str | None = None, + delete_patterns: list[str] | str | None = None, + card_kwargs: dict[str, Any] | None = None, + ) -> str: + """ + Upload model checkpoint to the Hub. + + Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use + `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more + details. + + Args: + repo_id (`str`): + ID of the repository to push to (example: `"username/my-model"`). + commit_message (`str`, *optional*): + Message to commit while pushing. + private (`bool`, *optional*): + Whether the repository created should be private. + If `None` (default), the repo will be public unless the organization's default is private. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. By default, it will use the token + cached when running `huggingface-cli login`. + branch (`str`, *optional*): + The git branch on which to push the model. This defaults to `"main"`. + create_pr (`boolean`, *optional*): + Whether or not to create a Pull Request from `branch` with that commit. Defaults to `False`. + allow_patterns (`List[str]` or `str`, *optional*): + If provided, only files matching at least one pattern are pushed. + ignore_patterns (`List[str]` or `str`, *optional*): + If provided, files matching any of the patterns are not pushed. + delete_patterns (`List[str]` or `str`, *optional*): + If provided, remote files matching any of the patterns will be deleted from the repo. + card_kwargs (`Dict[str, Any]`, *optional*): + Additional arguments passed to the card template to customize the card. + + Returns: + The url of the commit of your object in the given repository. + """ + api = HfApi(token=token) + repo_id = api.create_repo(repo_id=repo_id, private=private, exist_ok=True).repo_id + + if commit_message is None: + if "Policy" in self.__class__.__name__: + commit_message = "Upload policy" + elif "Config" in self.__class__.__name__: + commit_message = "Upload config" + else: + commit_message = f"Upload {self.__class__.__name__}" + + # Push the files to the repo in a single commit + with TemporaryDirectory(ignore_cleanup_errors=True) as tmp: + saved_path = Path(tmp) / repo_id + self.save_pretrained(saved_path, card_kwargs=card_kwargs) + return api.upload_folder( + repo_id=repo_id, + repo_type="model", + folder_path=saved_path, + commit_message=commit_message, + revision=branch, + create_pr=create_pr, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + delete_patterns=delete_patterns, + ) diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index abdcaec48b..73dab55ece 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -1,17 +1,17 @@ import abc import os -from copy import copy from dataclasses import dataclass, field from pathlib import Path from typing import Type, TypeVar import draccus -from huggingface_hub import ModelHubMixin, hf_hub_download +from huggingface_hub import hf_hub_download from huggingface_hub.constants import CONFIG_NAME from huggingface_hub.errors import HfHubHTTPError from lerobot.common.optim.optimizers import OptimizerConfig from lerobot.common.optim.schedulers import LRSchedulerConfig +from lerobot.common.utils.hub import HubMixin from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature # Generic variable that is either PretrainedConfig or a subclass thereof @@ -19,7 +19,7 @@ @dataclass -class PretrainedConfig(draccus.ChoiceRegistry, ModelHubMixin, abc.ABC): +class PretrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC): """ Base configuration class for policy models. @@ -34,8 +34,6 @@ class PretrainedConfig(draccus.ChoiceRegistry, ModelHubMixin, abc.ABC): the original scale. """ - type: str | None = "" - n_obs_steps: int = 1 normalization_mapping: dict[str, NormalizationMode] = field(default_factory=dict) @@ -43,9 +41,12 @@ class PretrainedConfig(draccus.ChoiceRegistry, ModelHubMixin, abc.ABC): output_features: dict[str, PolicyFeature] = field(default_factory=dict) def __post_init__(self): - self.type = self.get_choice_name(self.__class__) self.pretrained_path = None + @property + def type(self) -> str: + return self.get_choice_name(self.__class__) + @abc.abstractproperty def observation_delta_indices(self) -> list | None: raise NotImplementedError @@ -96,15 +97,13 @@ def action_feature(self) -> PolicyFeature | None: return None def _save_pretrained(self, save_directory: Path) -> None: - to_save = copy(self) - del to_save.path with open(save_directory / CONFIG_NAME, "w") as f, draccus.config_type("json"): draccus.dump(self, f, indent=4) @classmethod def from_pretrained( cls: Type[T], - pretrained_model_name_or_path: str | Path, + pretrained_name_or_path: str | Path, *, force_download: bool = False, resume_download: bool = None, @@ -115,7 +114,7 @@ def from_pretrained( revision: str | None = None, **model_kwargs, ) -> T: - model_id = str(pretrained_model_name_or_path) + model_id = str(pretrained_name_or_path) config_file: str | None = None if Path(model_id).is_dir(): if CONFIG_NAME in os.listdir(model_id): @@ -141,5 +140,4 @@ def from_pretrained( # HACK: this is very ugly, ideally we'd like to be able to do that natively with draccus # something like --policy.path (in addition to --policy.type) cli_overrides = model_kwargs.pop("cli_overrides", []) - instance = draccus.parse(cls, config_file, args=[]) - return draccus.parse(instance.__class__, config_file, args=cli_overrides) + return draccus.parse(cls, config_file, args=cli_overrides) From 5c8d5bd8d6a1e7eda61ee88960308eb92fdc92e9 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 17:32:53 +0100 Subject: [PATCH 41/94] Fixes --- lerobot/common/envs/configs.py | 1 - lerobot/configs/eval.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py index d0c53d22d6..ac96f5c59b 100644 --- a/lerobot/common/envs/configs.py +++ b/lerobot/common/envs/configs.py @@ -9,7 +9,6 @@ @dataclass class EnvConfig(draccus.ChoiceRegistry, abc.ABC): - n_envs: int | None = None task: str | None = None fps: int = 30 features: dict[str, PolicyFeature] = field(default_factory=dict) diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index 58e3c12a0e..f14a8bbae6 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -1,5 +1,5 @@ import datetime as dt -from dataclasses import Field, dataclass, fields +from dataclasses import Field, dataclass, field, fields from pathlib import Path from lerobot.common import envs, policies # noqa: F401 @@ -32,8 +32,8 @@ class EvalPipelineConfig: # Either the repo ID of a model hosted on the Hub or a path to a directory containing weights # saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch # (useful for debugging). This argument is mutually exclusive with `--config`. - eval: EvalConfig env: envs.EnvConfig + eval: EvalConfig = field(default_factory=EvalConfig) policy: PretrainedConfig | None = None output_dir: Path | None = None job_name: str | None = None From af36a88fad852c8b7590905853eb49d2025284df Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 17:33:41 +0100 Subject: [PATCH 42/94] Implement PreTrainedPolicy base class --- lerobot/common/policies/act/modeling_act.py | 13 +- .../policies/diffusion/modeling_diffusion.py | 13 +- lerobot/common/policies/pretrained.py | 176 ++++++++++++++++++ .../common/policies/tdmpc/modeling_tdmpc.py | 13 +- .../common/policies/vqbet/modeling_vqbet.py | 13 +- 5 files changed, 192 insertions(+), 36 deletions(-) create mode 100644 lerobot/common/policies/pretrained.py diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index 2e7b1f9ce7..773a9e3882 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -29,27 +29,22 @@ import torch import torch.nn.functional as F # noqa: N812 import torchvision -from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn from torchvision.models._utils import IntermediateLayerGetter from torchvision.ops.misc import FrozenBatchNorm2d from lerobot.common.policies.act.configuration_act import ACTConfig from lerobot.common.policies.normalize import Normalize, Unnormalize +from lerobot.common.policies.pretrained import PreTrainedPolicy -class ACTPolicy( - nn.Module, - PyTorchModelHubMixin, - library_name="lerobot", - repo_url="https://github.com/huggingface/lerobot", - tags=["robotics", "act"], -): +class ACTPolicy(PreTrainedPolicy): """ Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) """ + config_class: ACTConfig name = "act" def __init__( @@ -64,7 +59,7 @@ def __init__( dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ - super().__init__() + super().__init__(config) config.validate_features() self.config = config diff --git a/lerobot/common/policies/diffusion/modeling_diffusion.py b/lerobot/common/policies/diffusion/modeling_diffusion.py index 174bb24ede..7147f550a4 100644 --- a/lerobot/common/policies/diffusion/modeling_diffusion.py +++ b/lerobot/common/policies/diffusion/modeling_diffusion.py @@ -31,12 +31,12 @@ import torchvision from diffusers.schedulers.scheduling_ddim import DDIMScheduler from diffusers.schedulers.scheduling_ddpm import DDPMScheduler -from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn from lerobot.common.constants import OBS_ENV, OBS_ROBOT from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.normalize import Normalize, Unnormalize +from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.utils import ( get_device_from_parameters, get_dtype_from_parameters, @@ -45,18 +45,13 @@ ) -class DiffusionPolicy( - nn.Module, - PyTorchModelHubMixin, - library_name="lerobot", - repo_url="https://github.com/huggingface/lerobot", - tags=["robotics", "diffusion-policy"], -): +class DiffusionPolicy(PreTrainedPolicy): """ Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion" (paper: https://arxiv.org/abs/2303.04137, code: https://github.com/real-stanford/diffusion_policy). """ + config_class = DiffusionConfig name = "diffusion" def __init__( @@ -71,7 +66,7 @@ def __init__( dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ - super().__init__() + super().__init__(config) config.validate_features() self.config = config diff --git a/lerobot/common/policies/pretrained.py b/lerobot/common/policies/pretrained.py new file mode 100644 index 0000000000..ff3b3854a0 --- /dev/null +++ b/lerobot/common/policies/pretrained.py @@ -0,0 +1,176 @@ +import abc +import logging +import os +from pathlib import Path +from typing import Type, TypeVar + +import packaging +import safetensors +from huggingface_hub import hf_hub_download +from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE +from safetensors.torch import load_model as load_model_as_safetensor +from safetensors.torch import save_model as save_model_as_safetensor +from torch import Tensor, nn + +from lerobot.common.utils.hub import HubMixin +from lerobot.configs.policies import PretrainedConfig + +T = TypeVar("T", bound="PreTrainedPolicy") + +DEFAULT_POLICY_CARD = """ +--- +# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/model-cards +{{ card_data }} +--- + +This policy has been pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot): +- Docs: {{ docs_url | default("[More Information Needed]", true) }} +""" + + +class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC): + """ + Base class for policy models. + """ + + config_class: None + name: None + + def __init__(self, config: PretrainedConfig, *inputs, **kwargs): + super().__init__() + if not isinstance(config, PretrainedConfig): + raise ValueError( + f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " + "`PretrainedConfig`. To create a model from a pretrained model use " + f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.config = config + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + if not getattr(cls, "config_class", None): + raise TypeError(f"Class {cls.__name__} must define 'config_class'") + if not getattr(cls, "name", None): + raise TypeError(f"Class {cls.__name__} must define 'name'") + + def _save_pretrained(self, save_directory: Path) -> None: + self.config._save_pretrained(save_directory) + model_to_save = self.module if hasattr(self, "module") else self + save_model_as_safetensor(model_to_save, str(save_directory / SAFETENSORS_SINGLE_FILE)) + + @classmethod + def from_pretrained( + cls: Type[T], + pretrained_name_or_path: str | Path, + *, + config: PretrainedConfig | None = None, + force_download: bool = False, + resume_download: bool | None = None, + proxies: dict | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + map_location: str = "cpu", + strict: bool = False, + **kwargs, + ) -> T: + """ + The policy is set in evaluation mode by default using `policy.eval()` (dropout modules are + deactivated). To train it, you should first set it back in training mode with `policy.train()`. + """ + if config is None: + config = PretrainedConfig.from_pretrained( + pretrained_name_or_path=pretrained_name_or_path, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + revision=revision, + **kwargs, + ) + model_id = str(pretrained_name_or_path) + instance = cls(config, **kwargs) + if os.path.isdir(model_id): + print("Loading weights from local directory") + model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE) + policy = cls._load_as_safetensor(instance, model_file, map_location, strict) + else: + model_file = hf_hub_download( + repo_id=model_id, + filename=SAFETENSORS_SINGLE_FILE, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + policy = cls._load_as_safetensor(instance, model_file, map_location, strict) + + policy.to(map_location) + policy.eval() + return policy + + @classmethod + def _load_as_safetensor(cls, model: T, model_file: str, map_location: str, strict: bool) -> T: + if packaging.version.parse(safetensors.__version__) < packaging.version.parse("0.4.3"): + load_model_as_safetensor(model, model_file, strict=strict) + if map_location != "cpu": + logging.warning( + "Loading model weights on other devices than 'cpu' is not supported natively in your version of safetensors." + " This means that the model is loaded on 'cpu' first and then copied to the device." + " This leads to a slower loading time." + " Please update safetensors to version 0.4.3 or above for improved performance." + ) + model.to(map_location) + else: + safetensors.torch.load_model(model, model_file, strict=strict, device=map_location) + return model + + # def generate_model_card(self, *args, **kwargs) -> ModelCard: + # card = ModelCard.from_template( + # card_data=self._hub_mixin_info.model_card_data, + # template_str=self._hub_mixin_info.model_card_template, + # repo_url=self._hub_mixin_info.repo_url, + # docs_url=self._hub_mixin_info.docs_url, + # **kwargs, + # ) + # return card + + @abc.abstractmethod + def get_optim_params(self) -> dict: + """ + Returns the policy-specific parameters dict to be passed on to the optimizer. + """ + raise NotImplementedError + + @abc.abstractmethod + def reset(self): + """To be called whenever the environment is reset. + + Does things like clearing caches. + """ + raise NotImplementedError + + @abc.abstractmethod + def forward(self, batch: dict[str, Tensor]) -> dict: + """Run the batch through the model and compute the loss for training or validation. + + Returns a dictionary with "loss" and potentially other information. Apart from "loss" which is a Tensor, all + other items should be logging-friendly, native Python types. + """ + raise NotImplementedError + + @abc.abstractmethod + def select_action(self, batch: dict[str, Tensor]) -> Tensor: + """Return one action to run in the environment (potentially in batch mode). + + When the model uses a history of observations, or outputs a sequence of actions, this method deals + with caching. + """ + raise NotImplementedError diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index 5106b46673..aa6b30b0ab 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -33,22 +33,16 @@ import torch import torch.nn as nn import torch.nn.functional as F # noqa: N812 -from huggingface_hub import PyTorchModelHubMixin from torch import Tensor from lerobot.common.constants import OBS_ENV, OBS_ROBOT from lerobot.common.policies.normalize import Normalize, Unnormalize +from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.utils import get_device_from_parameters, get_output_shape, populate_queues -class TDMPCPolicy( - nn.Module, - PyTorchModelHubMixin, - library_name="lerobot", - repo_url="https://github.com/huggingface/lerobot", - tags=["robotics", "tdmpc"], -): +class TDMPCPolicy(PreTrainedPolicy): """Implementation of TD-MPC learning + inference. Please note several warnings for this policy. @@ -66,6 +60,7 @@ class TDMPCPolicy( match our xarm environment. """ + config_class: TDMPCConfig name = "tdmpc" def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None): @@ -76,7 +71,7 @@ def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tenso dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ - super().__init__() + super().__init__(config) config.validate_features() self.config = config diff --git a/lerobot/common/policies/vqbet/modeling_vqbet.py b/lerobot/common/policies/vqbet/modeling_vqbet.py index 7f19b19e08..d250e54ace 100644 --- a/lerobot/common/policies/vqbet/modeling_vqbet.py +++ b/lerobot/common/policies/vqbet/modeling_vqbet.py @@ -25,10 +25,10 @@ import torch import torch.nn.functional as F # noqa: N812 import torchvision -from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn from lerobot.common.policies.normalize import Normalize, Unnormalize +from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.utils import get_device_from_parameters, get_output_shape, populate_queues from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.common.policies.vqbet.vqbet_utils import GPT, ResidualVQ @@ -36,17 +36,12 @@ # ruff: noqa: N806 -class VQBeTPolicy( - nn.Module, - PyTorchModelHubMixin, - library_name="lerobot", - repo_url="https://github.com/huggingface/lerobot", - tags=["robotics", "vqbet"], -): +class VQBeTPolicy(PreTrainedPolicy): """ VQ-BeT Policy as per "Behavior Generation with Latent Actions" """ + config_class: VQBeTConfig name = "vqbet" def __init__( @@ -61,7 +56,7 @@ def __init__( dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ - super().__init__() + super().__init__(config) config.validate_features() self.config = config From a12f474ce492a9c8e105aae7fb60c9babf3cf9e0 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 18:16:04 +0100 Subject: [PATCH 43/94] Add HubMixin to TrainPipelineConfig --- lerobot/common/logger.py | 4 +-- lerobot/configs/policies.py | 6 ++--- lerobot/configs/train.py | 54 +++++++++++++++++++++++++++++++++++-- 3 files changed, 57 insertions(+), 7 deletions(-) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 2127342d1f..229bcd3801 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -34,7 +34,7 @@ from lerobot.common.policies.policy_protocol import Policy from lerobot.common.utils.utils import get_global_random_state -from lerobot.configs.train import TRAIN_CONFIG_FILE, TrainPipelineConfig +from lerobot.configs.train import TRAIN_CONFIG_NAME, TrainPipelineConfig from lerobot.configs.types import FeatureType, NormalizationMode PRETRAINED_MODEL = "pretrained_model" @@ -162,7 +162,7 @@ def save_model(self, save_dir: Path, policy: Policy, wandb_artifact_name: str | register_features_types() policy.save_pretrained(save_dir) # Also save the full config for the env configuration. - with open(save_dir / TRAIN_CONFIG_FILE, "w") as f: + with open(save_dir / TRAIN_CONFIG_NAME, "w") as f: draccus.dump(self._cfg, f, indent=4) if self._wandb and not self._cfg.wandb.disable_artifact: # note wandb artifact does not accept ":" or "/" in its name diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index 73dab55ece..de1a977be7 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -112,7 +112,7 @@ def from_pretrained( cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, - **model_kwargs, + **policy_kwargs, ) -> T: model_id = str(pretrained_name_or_path) config_file: str | None = None @@ -120,7 +120,7 @@ def from_pretrained( if CONFIG_NAME in os.listdir(model_id): config_file = os.path.join(model_id, CONFIG_NAME) else: - print(f"config.json not found in {Path(model_id).resolve()}") + print(f"{CONFIG_NAME} not found in {Path(model_id).resolve()}") else: try: config_file = hf_hub_download( @@ -139,5 +139,5 @@ def from_pretrained( # HACK: this is very ugly, ideally we'd like to be able to do that natively with draccus # something like --policy.path (in addition to --policy.type) - cli_overrides = model_kwargs.pop("cli_overrides", []) + cli_overrides = policy_kwargs.pop("cli_overrides", []) return draccus.parse(cls, config_file, args=cli_overrides) diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index 76882bfbee..911c16a626 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -1,16 +1,23 @@ import datetime as dt +import os from dataclasses import Field, dataclass, field, fields from pathlib import Path +from typing import Type + +import draccus +from huggingface_hub import hf_hub_download +from huggingface_hub.errors import HfHubHTTPError from lerobot.common import envs from lerobot.common.optim import OptimizerConfig from lerobot.common.optim.schedulers import LRSchedulerConfig +from lerobot.common.utils.hub import HubMixin from lerobot.configs import parser from lerobot.configs.default import DatasetConfig, WandBConfig from lerobot.configs.eval import EvalConfig from lerobot.configs.policies import PretrainedConfig -TRAIN_CONFIG_FILE = "train_config.json" +TRAIN_CONFIG_NAME = "train_config.json" @dataclass @@ -63,7 +70,7 @@ class OnlineConfig: @dataclass -class TrainPipelineConfig: +class TrainPipelineConfig(HubMixin): dataset: DatasetConfig env: envs.EnvConfig | None = None policy: PretrainedConfig | None = None @@ -153,3 +160,46 @@ def __get_path_fields__(cls) -> list[Field]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" path_fields = ["policy"] return [f for f in fields(cls) if f.name in path_fields] + + def _save_pretrained(self, save_directory: Path) -> None: + with open(save_directory / TRAIN_CONFIG_NAME, "w") as f, draccus.config_type("json"): + draccus.dump(self, f, indent=4) + + @classmethod + def from_pretrained( + cls: Type["TrainPipelineConfig"], + pretrained_name_or_path: str | Path, + *, + force_download: bool = False, + resume_download: bool = None, + proxies: dict | None = None, + token: str | bool | None = None, + cache_dir: str | Path | None = None, + local_files_only: bool = False, + revision: str | None = None, + **policy_kwargs, + ) -> "TrainPipelineConfig": + model_id = str(pretrained_name_or_path) + config_file: str | None = None + if Path(model_id).is_dir(): + if TRAIN_CONFIG_NAME in os.listdir(model_id): + config_file = os.path.join(model_id, TRAIN_CONFIG_NAME) + else: + print(f"{TRAIN_CONFIG_NAME} not found in {Path(model_id).resolve()}") + else: + try: + config_file = hf_hub_download( + repo_id=model_id, + filename=TRAIN_CONFIG_NAME, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + except HfHubHTTPError as e: + print(f"config.json not found on the HuggingFace Hub: {str(e)}") + + return draccus.parse(cls, config_file, args=[]) From 8e3777a70ac144c20078281d1e4dab18c4cb81af Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 18:19:04 +0100 Subject: [PATCH 44/94] Udpate example 2 & 3 --- examples/2_evaluate_pretrained_policy.py | 32 ++--- examples/3_train_policy.py | 167 ++++++++++++++--------- 2 files changed, 116 insertions(+), 83 deletions(-) diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py index 5c3640682e..45dcf8e717 100644 --- a/examples/2_evaluate_pretrained_policy.py +++ b/examples/2_evaluate_pretrained_policy.py @@ -22,26 +22,16 @@ output_directory = Path("outputs/eval/example_pusht_diffusion") output_directory.mkdir(parents=True, exist_ok=True) +# Select your device +device = "cuda" + # Provide the [hugging face repo id](https://huggingface.co/lerobot/diffusion_pusht): pretrained_policy_path = "lerobot/diffusion_pusht" # OR a path to a local outputs/train folder. # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") -# TODO(alibert, rcadene): fix this file -policy = DiffusionPolicy.from_pretrained(pretrained_policy_path) -policy.eval() - -# Check if GPU is available -if torch.cuda.is_available(): - device = torch.device("cuda") - print("GPU is available. Device set to:", device) -else: - device = torch.device("cpu") - print(f"GPU is not available. Device set to: {device}. Inference will be slower than on GPU.") - # Decrease the number of reverse-diffusion steps (trades off a bit of quality for 10x speed) - policy.diffusion.num_inference_steps = 10 - -policy.to(device) +# TODO: remove revision before merge +policy = DiffusionPolicy.from_pretrained(pretrained_policy_path, revision="remove_hydra", map_location=device) # Initialize evaluation environment to render two observation types: # an image of the scene and state/position of the agent. The environment @@ -52,7 +42,17 @@ max_episode_steps=300, ) -# Reset the policy and environmens to prepare for rollout +# We can verify that the shapes of the features expected by the policy match the ones from the observations +# produced by the environment +print(policy.config.input_features) +print(env.observation_space) + +# Similarly, we can check that the actions produced by the policy will match the actions expected by the +# environment +print(policy.config.output_features) +print(env.action_space) + +# Reset the policy and environments to prepare for rollout policy.reset() numpy_observation, info = env.reset(seed=42) diff --git a/examples/3_train_policy.py b/examples/3_train_policy.py index c52ee32191..daf5abb33a 100644 --- a/examples/3_train_policy.py +++ b/examples/3_train_policy.py @@ -8,73 +8,106 @@ import torch -from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata +from lerobot.common.datasets.utils import dataset_to_policy_features from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy +from lerobot.configs.types import FeatureType -# Create a directory to store the training checkpoint. -output_directory = Path("outputs/train/example_pusht_diffusion") -output_directory.mkdir(parents=True, exist_ok=True) - -# Number of offline training steps (we'll only do offline training for this example.) -# Adjust as you prefer. 5000 steps are needed to get something worth evaluating. -training_steps = 5000 -device = torch.device("cuda") -log_freq = 250 - -# Set up the dataset. -delta_timestamps = { - # Load the previous image and state at -0.1 seconds before current frame, - # then load current image and state corresponding to 0.0 second. - "observation.image": [-0.1, 0.0], - "observation.state": [-0.1, 0.0], - # Load the previous action (-0.1), the next action to be executed (0.0), - # and 14 future actions with a 0.1 seconds spacing. All these actions will be - # used to supervise the policy. - "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], -} -dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps) - -# Set up the the policy. -# Policies are initialized with a configuration class, in this case `DiffusionConfig`. -# For this example, no arguments need to be passed because the defaults are set up for PushT. -# If you're doing something different, you will likely need to change at least some of the defaults. -cfg = DiffusionConfig() -cfg.parse_features_from_dataset(dataset.meta) -policy = DiffusionPolicy(cfg, dataset_stats=dataset.meta.stats) -policy.train() -policy.to(device) - -optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4) - -# Create dataloader for offline training. -dataloader = torch.utils.data.DataLoader( - dataset, - num_workers=4, - batch_size=64, - shuffle=True, - pin_memory=device != torch.device("cpu"), - drop_last=True, -) - -# Run training loop. -step = 0 -done = False -while not done: - for batch in dataloader: - batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()} - output_dict = policy.forward(batch) - loss = output_dict["loss"] - loss.backward() - optimizer.step() - optimizer.zero_grad() - - if step % log_freq == 0: - print(f"step: {step} loss: {loss.item():.3f}") - step += 1 - if step >= training_steps: - done = True - break - -# Save a policy checkpoint. -policy.save_pretrained(output_directory) + +def main(): + # Create a directory to store the training checkpoint. + output_directory = Path("outputs/train/example_pusht_diffusion") + output_directory.mkdir(parents=True, exist_ok=True) + + # # Select your device + device = torch.device("cuda") + + # Number of offline training steps (we'll only do offline training for this example.) + # Adjust as you prefer. 5000 steps are needed to get something worth evaluating. + training_steps = 5000 + log_freq = 1 + + # When starting from scratch (i.e. not from a pretrained policy), we need to specify 2 things before + # creating the policy: + # - input/output shapes: to properly size the policy + # - dataset stats: for normalization and denormalization of input/outputs + dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht") + features = dataset_to_policy_features(dataset_metadata.features) + output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} + input_features = {key: ft for key, ft in features.items() if key not in output_features} + + # Policies are initialized with a configuration class, in this case `DiffusionConfig`. For this example, + # we'll just use the defaults and so no arguments other than input/output features need to be passed. + cfg = DiffusionConfig(input_features=input_features, output_features=output_features) + + # We can now instantiate our policy with this config and the dataset stats. + policy = DiffusionPolicy(cfg, dataset_stats=dataset_metadata.stats) + policy.train() + policy.to(device) + + # Another policy-dataset interaction is with the delta_timestamps. Each policy expects a given number frames + # which can differ for inputs, outputs and rewards (if there are some). + delta_timestamps = { + "observation.image": [i / dataset_metadata.fps for i in cfg.observation_delta_indices], + "observation.state": [i / dataset_metadata.fps for i in cfg.observation_delta_indices], + "action": [i / dataset_metadata.fps for i in cfg.action_delta_indices], + } + + # In this case with the standard configuration for Diffusion Policy, it is equivalent to this: + delta_timestamps = { + # Load the previous image and state at -0.1 seconds before current frame, + # then load current image and state corresponding to 0.0 second. + "observation.image": [-0.1, 0.0], + "observation.state": [-0.1, 0.0], + # Load the previous action (-0.1), the next action to be executed (0.0), + # and 14 future actions with a 0.1 seconds spacing. All these actions will be + # used to supervise the policy. + "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], + } + + # We can then instantiate the dataset with these delta_timestamps configuration. + dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps) + + # TODO: keep? + # from lerobot.common.optim.factory import make_optimizer_and_scheduler + # from lerobot.configs.train import TrainPipelineConfig + # train_cfg = TrainPipelineConfig.from_pretrained("lerobot/diffusion_pusht", revision="remove_hydra") + # optimizer, _ = make_optimizer_and_scheduler(train_cfg, policy) + + # Then we create our optimizer and dataloader for offline training. + optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4) + dataloader = torch.utils.data.DataLoader( + dataset, + num_workers=4, + batch_size=64, + shuffle=True, + pin_memory=device.type != "cpu", + drop_last=True, + ) + + # Run training loop. + step = 0 + done = False + while not done: + for batch in dataloader: + batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()} + output_dict = policy.forward(batch) + loss = output_dict["loss"] + loss.backward() + optimizer.step() + optimizer.zero_grad() + + if step % log_freq == 0: + print(f"step: {step} loss: {loss.item():.3f}") + step += 1 + if step >= training_steps: + done = True + break + + # Save a policy checkpoint. + policy.save_pretrained(output_directory) + + +if __name__ == "__main__": + main() From 71212b8def1c8ff2b1e0751d71d735fa0588e13d Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 18:58:37 +0100 Subject: [PATCH 45/94] Update push_pretrained --- lerobot/scripts/push_pretrained.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/lerobot/scripts/push_pretrained.py b/lerobot/scripts/push_pretrained.py index 8472bf98a1..078cb0587d 100644 --- a/lerobot/scripts/push_pretrained.py +++ b/lerobot/scripts/push_pretrained.py @@ -30,7 +30,7 @@ from pathlib import Path import draccus -from huggingface_hub import create_repo, upload_folder +from huggingface_hub import HfApi @dataclass @@ -44,13 +44,22 @@ class PushPretrainedConfig: @draccus.wrap() def main(cfg: PushPretrainedConfig): - create_repo( + hub_api = HfApi() + hub_api.create_repo( repo_id=cfg.repo_id, private=cfg.private, repo_type="model", exist_ok=cfg.exist_ok, ) - upload_folder( + if cfg.branch: + hub_api.create_branch( + repo_id=cfg.repo_id, + branch=cfg.branch, + repo_type="model", + exist_ok=cfg.exist_ok, + ) + + hub_api.upload_folder( repo_id=cfg.repo_id, folder_path=cfg.pretrained_path, repo_type="model", From 136bf508579b0ca1c3ecfc37f5852e76dda2d242 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 19:25:19 +0100 Subject: [PATCH 46/94] Fix config_class --- lerobot/common/policies/act/modeling_act.py | 2 +- lerobot/common/policies/tdmpc/modeling_tdmpc.py | 2 +- lerobot/common/policies/vqbet/modeling_vqbet.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index 773a9e3882..615f238f11 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -44,7 +44,7 @@ class ACTPolicy(PreTrainedPolicy): Hardware (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) """ - config_class: ACTConfig + config_class = ACTConfig name = "act" def __init__( diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index aa6b30b0ab..519bc22003 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -60,7 +60,7 @@ class TDMPCPolicy(PreTrainedPolicy): match our xarm environment. """ - config_class: TDMPCConfig + config_class = TDMPCConfig name = "tdmpc" def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None): diff --git a/lerobot/common/policies/vqbet/modeling_vqbet.py b/lerobot/common/policies/vqbet/modeling_vqbet.py index d250e54ace..c4d4a46dd0 100644 --- a/lerobot/common/policies/vqbet/modeling_vqbet.py +++ b/lerobot/common/policies/vqbet/modeling_vqbet.py @@ -41,7 +41,7 @@ class VQBeTPolicy(PreTrainedPolicy): VQ-BeT Policy as per "Behavior Generation with Latent Actions" """ - config_class: VQBeTConfig + config_class = VQBeTConfig name = "vqbet" def __init__( From 17358d61018cf47ab36c6442134edfde4b96f597 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 19:34:22 +0100 Subject: [PATCH 47/94] Fix from_pretrained kwargs --- lerobot/common/policies/factory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 0508d25dde..0152b78323 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -120,7 +120,7 @@ def make_policy( if cfg.pretrained_path: # Load a pretrained policy and override the config if needed (for example, if there are inference-time # hyperparameters that we want to vary). - kwargs["pretrained_model_name_or_path"] = cfg.pretrained_path + kwargs["pretrained_name_or_path"] = cfg.pretrained_path policy = policy_cls.from_pretrained(**kwargs) else: # Make a fresh policy. From 0862e2085d750f28a5aff132bc83b38357e0722f Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 20:02:22 +0100 Subject: [PATCH 48/94] Remove policy_protocol --- lerobot/common/logger.py | 6 +- lerobot/common/optim/factory.py | 4 +- lerobot/common/policies/__init__.py | 1 - lerobot/common/policies/factory.py | 6 +- lerobot/common/policies/policy_protocol.py | 80 ---------------------- lerobot/common/utils/utils.py | 4 ++ lerobot/scripts/eval.py | 8 +-- lerobot/scripts/train.py | 4 +- tests/test_policies.py | 10 +-- 9 files changed, 20 insertions(+), 103 deletions(-) delete mode 100644 lerobot/common/policies/policy_protocol.py diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 229bcd3801..9583c9110c 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -32,7 +32,7 @@ from torch.optim import Optimizer from torch.optim.lr_scheduler import LRScheduler -from lerobot.common.policies.policy_protocol import Policy +from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.utils.utils import get_global_random_state from lerobot.configs.train import TRAIN_CONFIG_NAME, TrainPipelineConfig from lerobot.configs.types import FeatureType, NormalizationMode @@ -150,7 +150,7 @@ def get_last_pretrained_model_dir(cls, log_dir: str | Path) -> Path: """ return cls.get_last_checkpoint_dir(log_dir) / cls.pretrained_model_dir_name - def save_model(self, save_dir: Path, policy: Policy, wandb_artifact_name: str | None = None): + def save_model(self, save_dir: Path, policy: PreTrainedPolicy, wandb_artifact_name: str | None = None): """Save the weights of the Policy model using PyTorchModelHubMixin. The weights are saved in a folder called "pretrained_model" under the checkpoint directory. @@ -196,7 +196,7 @@ def save_checkpoint( self, train_step: int, identifier: str, - policy: Policy, + policy: PreTrainedPolicy, optimizer: Optimizer | None = None, scheduler: LRScheduler | None = None, ): diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py index 5d8fdb57af..c0ebc7b62a 100644 --- a/lerobot/common/optim/factory.py +++ b/lerobot/common/optim/factory.py @@ -21,13 +21,13 @@ from torch.optim.lr_scheduler import LRScheduler from lerobot.common.logger import TRAINING_STATE -from lerobot.common.policies import Policy +from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.utils.utils import get_global_random_state, set_global_random_state from lerobot.configs.train import TrainPipelineConfig def make_optimizer_and_scheduler( - cfg: TrainPipelineConfig, policy: Policy + cfg: TrainPipelineConfig, policy: PreTrainedPolicy ) -> tuple[Optimizer, LRScheduler | None]: params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters() optimizer = cfg.optimizer.build(params) diff --git a/lerobot/common/policies/__init__.py b/lerobot/common/policies/__init__.py index 5d1e24e551..58db9849f3 100644 --- a/lerobot/common/policies/__init__.py +++ b/lerobot/common/policies/__init__.py @@ -1,5 +1,4 @@ from .act.configuration_act import ACTConfig as ACTConfig from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig -from .policy_protocol import Policy as Policy from .tdmpc.configuration_tdmpc import TDMPCConfig as TDMPCConfig from .vqbet.configuration_vqbet import VQBeTConfig as VQBeTConfig diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 0152b78323..875c018751 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -23,14 +23,14 @@ from lerobot.common.envs.utils import env_to_policy_features from lerobot.common.policies.act.configuration_act import ACTConfig from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig -from lerobot.common.policies.policy_protocol import Policy +from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.configs.policies import PretrainedConfig from lerobot.configs.types import FeatureType -def get_policy_class(name: str) -> Policy: +def get_policy_class(name: str) -> PreTrainedPolicy: """Get the policy's class and config class given a name (matching the policy class' `name` attribute).""" if name == "tdmpc": from lerobot.common.policies.tdmpc.modeling_tdmpc import TDMPCPolicy @@ -71,7 +71,7 @@ def make_policy( ds_meta: LeRobotDatasetMetadata | None = None, env: gym.Env | None = None, env_cfg: EnvConfig | None = None, -) -> Policy: +) -> PreTrainedPolicy: """Make an instance of a policy class. Args: diff --git a/lerobot/common/policies/policy_protocol.py b/lerobot/common/policies/policy_protocol.py deleted file mode 100644 index 7e25b8f0e9..0000000000 --- a/lerobot/common/policies/policy_protocol.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2024 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""A protocol that all policies should follow. - -This provides a mechanism for type-hinting and isinstance checks without requiring the policies classes -subclass a base class. - -The protocol structure, method signatures, and docstrings should be used by developers as a reference for -how to implement new policies. -""" - -from typing import Protocol, runtime_checkable - -from torch import Tensor - - -@runtime_checkable -class Policy(Protocol): - """The required interface for implementing a policy. - - We also expect all policies to subclass torch.nn.Module and PyTorchModelHubMixin. - """ - - name: str - - def __init__(self, cfg, dataset_stats: dict[str, dict[str, Tensor]] | None = None): - """ - Args: - cfg: Policy configuration class instance or None, in which case the default instantiation of the - configuration class is used. - dataset_stats: Dataset statistics to be used for normalization. - """ - - def get_optim_params(self) -> dict: - """ - Returns the policy-specific parameters dict to be passed on to the optimizer. - """ - - def reset(self): - """To be called whenever the environment is reset. - - Does things like clearing caches. - """ - - def forward(self, batch: dict[str, Tensor]) -> dict: - """Run the batch through the model and compute the loss for training or validation. - - Returns a dictionary with "loss" and potentially other information. Apart from "loss" which is a Tensor, all - other items should be logging-friendly, native Python types. - """ - - def select_action(self, batch: dict[str, Tensor]) -> Tensor: - """Return one action to run in the environment (potentially in batch mode). - - When the model uses a history of observations, or outputs a sequence of actions, this method deals - with caching. - """ - - -@runtime_checkable -class PolicyWithUpdate(Policy, Protocol): - def update(self): - """An update method that is to be called after a training optimization step. - - Implements an additional updates the model parameters may need (for example, doing an EMA step for a - target model, or incrementing an internal buffer). - """ diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index cbd5e8b391..a85712c54d 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -210,3 +210,7 @@ def get_channel_first_image_shape(image_shape: tuple) -> tuple: raise ValueError(image_shape) return shape + + +def has_method(cls: Any, method_name: str): + return hasattr(cls, method_name) and callable(getattr(cls, method_name)) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 4054dcd825..c67ac31b2a 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -63,7 +63,7 @@ from lerobot.common.envs.utils import preprocess_observation from lerobot.common.logger import log_output_dir from lerobot.common.policies.factory import make_policy -from lerobot.common.policies.policy_protocol import Policy +from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.utils import get_device_from_parameters from lerobot.common.utils.io_utils import write_video from lerobot.common.utils.utils import ( @@ -78,7 +78,7 @@ def rollout( env: gym.vector.VectorEnv, - policy: Policy, + policy: PreTrainedPolicy, seeds: list[int] | None = None, return_observations: bool = False, render_callback: Callable[[gym.vector.VectorEnv], None] | None = None, @@ -205,7 +205,7 @@ def rollout( def eval_policy( env: gym.vector.VectorEnv, - policy: torch.nn.Module, + policy: PreTrainedPolicy, n_episodes: int, max_episodes_rendered: int = 0, videos_dir: Path | None = None, @@ -229,7 +229,7 @@ def eval_policy( if max_episodes_rendered > 0 and not videos_dir: raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.") - assert isinstance(policy, Policy) + assert isinstance(policy, PreTrainedPolicy) start = time.time() policy.eval() diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 9933666b52..a7d3fa543f 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -35,11 +35,11 @@ from lerobot.common.logger import Logger, log_output_dir from lerobot.common.optim.factory import load_training_state, make_optimizer_and_scheduler from lerobot.common.policies.factory import make_policy -from lerobot.common.policies.policy_protocol import PolicyWithUpdate from lerobot.common.policies.utils import get_device_from_parameters from lerobot.common.utils.utils import ( format_big_number, get_safe_torch_device, + has_method, init_logging, set_global_seed, ) @@ -89,7 +89,7 @@ def update_policy( if lr_scheduler is not None: lr_scheduler.step() - if isinstance(policy, PolicyWithUpdate): + if has_method(policy, "update"): # To possibly update an internal buffer (for instance an Exponential Moving Average like in TDMPC). policy.update() diff --git a/tests/test_policies.py b/tests/test_policies.py index 6366f399a9..bf6eabcc7c 100644 --- a/tests/test_policies.py +++ b/tests/test_policies.py @@ -20,7 +20,6 @@ import einops import pytest import torch -from huggingface_hub import PyTorchModelHubMixin from safetensors.torch import load_file from lerobot import available_policies @@ -36,7 +35,7 @@ make_policy_config, ) from lerobot.common.policies.normalize import Normalize, Unnormalize -from lerobot.common.policies.policy_protocol import Policy +from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.utils.utils import seeded_context from lerobot.configs.default import DatasetConfig from lerobot.configs.train import TrainPipelineConfig @@ -150,12 +149,7 @@ def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs): # Check that we can make the policy object. dataset = make_dataset(train_cfg) policy = make_policy(train_cfg.policy, ds_meta=dataset.meta, device=DEVICE) - # Check that the policy follows the required protocol. - assert isinstance( - policy, Policy - ), f"The policy does not follow the required protocol. Please see {Policy.__module__}.{Policy.__name__}." - assert isinstance(policy, torch.nn.Module) - assert isinstance(policy, PyTorchModelHubMixin) + assert isinstance(policy, PreTrainedPolicy) # Check that we run select_actions and get the appropriate output. env = make_env(train_cfg.env, n_envs=2) From 25f90205d55dc16a72194abcf9ab134c8d4a5b31 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Thu, 23 Jan 2025 20:12:16 +0100 Subject: [PATCH 49/94] Camelize PretrainedConfig --- lerobot/common/datasets/factory.py | 6 +++--- lerobot/common/policies/act/configuration_act.py | 6 +++--- .../policies/diffusion/configuration_diffusion.py | 6 +++--- lerobot/common/policies/factory.py | 6 +++--- lerobot/common/policies/pretrained.py | 12 ++++++------ lerobot/common/policies/tdmpc/configuration_tdmpc.py | 6 +++--- lerobot/common/policies/vqbet/configuration_vqbet.py | 6 +++--- lerobot/common/robot_devices/control_configs.py | 6 +++--- lerobot/configs/eval.py | 6 +++--- lerobot/configs/policies.py | 6 +++--- lerobot/configs/train.py | 6 +++--- lerobot/scripts/push_pretrained.py | 4 ++-- tests/test_control_robot.py | 4 ++-- 13 files changed, 40 insertions(+), 40 deletions(-) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index a507c7d479..1b5863466d 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -24,7 +24,7 @@ MultiLeRobotDataset, ) from lerobot.common.datasets.transforms import ImageTransforms -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.train import TrainPipelineConfig IMAGENET_STATS = { @@ -34,7 +34,7 @@ def resolve_delta_timestamps( - cfg: PretrainedConfig, ds_meta: LeRobotDatasetMetadata + cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata ) -> dict[str, list] | None: """Resolves delta_timestamps config key (in-place) by using `eval`. @@ -59,7 +59,7 @@ def resolve_delta_timestamps( def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDataset: """ Args: - cfg: A TrainPipelineConfig config which contains a DatasetConfig and a PretrainedConfig. + cfg: A TrainPipelineConfig config which contains a DatasetConfig and a PreTrainedConfig. Returns: A LeRobotDataset. diff --git a/lerobot/common/policies/act/configuration_act.py b/lerobot/common/policies/act/configuration_act.py index 858cd70d36..4f724c1259 100644 --- a/lerobot/common/policies/act/configuration_act.py +++ b/lerobot/common/policies/act/configuration_act.py @@ -16,13 +16,13 @@ from dataclasses import dataclass, field from lerobot.common.optim.optimizers import AdamWConfig -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode -@PretrainedConfig.register_subclass("act") +@PreTrainedConfig.register_subclass("act") @dataclass -class ACTConfig(PretrainedConfig): +class ACTConfig(PreTrainedConfig): """Configuration class for the Action Chunking Transformers policy. Defaults are configured for training on bimanual Aloha tasks like "insertion" or "transfer". diff --git a/lerobot/common/policies/diffusion/configuration_diffusion.py b/lerobot/common/policies/diffusion/configuration_diffusion.py index b92c9974c9..31d5dc8b09 100644 --- a/lerobot/common/policies/diffusion/configuration_diffusion.py +++ b/lerobot/common/policies/diffusion/configuration_diffusion.py @@ -18,13 +18,13 @@ from lerobot.common.optim.optimizers import AdamConfig from lerobot.common.optim.schedulers import DiffuserSchedulerConfig -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode -@PretrainedConfig.register_subclass("diffusion") +@PreTrainedConfig.register_subclass("diffusion") @dataclass -class DiffusionConfig(PretrainedConfig): +class DiffusionConfig(PreTrainedConfig): """Configuration class for DiffusionPolicy. Defaults are configured for training with PushT providing proprioceptive and single camera observations. diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 875c018751..896ef0a2a8 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -26,7 +26,7 @@ from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import FeatureType @@ -52,7 +52,7 @@ def get_policy_class(name: str) -> PreTrainedPolicy: raise NotImplementedError(f"Policy with name {name} is not implemented.") -def make_policy_config(policy_type: str, **kwargs) -> PretrainedConfig: +def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig: if policy_type == "tdmpc": return TDMPCConfig(**kwargs) elif policy_type == "diffusion": @@ -66,7 +66,7 @@ def make_policy_config(policy_type: str, **kwargs) -> PretrainedConfig: def make_policy( - cfg: PretrainedConfig, + cfg: PreTrainedConfig, device: str, ds_meta: LeRobotDatasetMetadata | None = None, env: gym.Env | None = None, diff --git a/lerobot/common/policies/pretrained.py b/lerobot/common/policies/pretrained.py index ff3b3854a0..288f139b3d 100644 --- a/lerobot/common/policies/pretrained.py +++ b/lerobot/common/policies/pretrained.py @@ -13,7 +13,7 @@ from torch import Tensor, nn from lerobot.common.utils.hub import HubMixin -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig T = TypeVar("T", bound="PreTrainedPolicy") @@ -37,12 +37,12 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC): config_class: None name: None - def __init__(self, config: PretrainedConfig, *inputs, **kwargs): + def __init__(self, config: PreTrainedConfig, *inputs, **kwargs): super().__init__() - if not isinstance(config, PretrainedConfig): + if not isinstance(config, PreTrainedConfig): raise ValueError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " - "`PretrainedConfig`. To create a model from a pretrained model use " + "`PreTrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.config = config @@ -64,7 +64,7 @@ def from_pretrained( cls: Type[T], pretrained_name_or_path: str | Path, *, - config: PretrainedConfig | None = None, + config: PreTrainedConfig | None = None, force_download: bool = False, resume_download: bool | None = None, proxies: dict | None = None, @@ -81,7 +81,7 @@ def from_pretrained( deactivated). To train it, you should first set it back in training mode with `policy.train()`. """ if config is None: - config = PretrainedConfig.from_pretrained( + config = PreTrainedConfig.from_pretrained( pretrained_name_or_path=pretrained_name_or_path, force_download=force_download, resume_download=resume_download, diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/lerobot/common/policies/tdmpc/configuration_tdmpc.py index 0d2b046ff4..5b128ec552 100644 --- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py +++ b/lerobot/common/policies/tdmpc/configuration_tdmpc.py @@ -17,13 +17,13 @@ from dataclasses import dataclass, field from lerobot.common.optim.optimizers import AdamConfig -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode -@PretrainedConfig.register_subclass("tdmpc") +@PreTrainedConfig.register_subclass("tdmpc") @dataclass -class TDMPCConfig(PretrainedConfig): +class TDMPCConfig(PreTrainedConfig): """Configuration class for TDMPCPolicy. Defaults are configured for training with xarm_lift_medium_replay providing proprioceptive and single diff --git a/lerobot/common/policies/vqbet/configuration_vqbet.py b/lerobot/common/policies/vqbet/configuration_vqbet.py index c2a3ca6960..2f3617cc68 100644 --- a/lerobot/common/policies/vqbet/configuration_vqbet.py +++ b/lerobot/common/policies/vqbet/configuration_vqbet.py @@ -20,13 +20,13 @@ from lerobot.common.optim.optimizers import AdamConfig from lerobot.common.optim.schedulers import VQBeTSchedulerConfig -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode -@PretrainedConfig.register_subclass("vqbet") +@PreTrainedConfig.register_subclass("vqbet") @dataclass -class VQBeTConfig(PretrainedConfig): +class VQBeTConfig(PreTrainedConfig): """Configuration class for VQ-BeT. Defaults are configured for training with PushT providing proprioceptive and single camera observations. diff --git a/lerobot/common/robot_devices/control_configs.py b/lerobot/common/robot_devices/control_configs.py index 30c4984901..24d4546161 100644 --- a/lerobot/common/robot_devices/control_configs.py +++ b/lerobot/common/robot_devices/control_configs.py @@ -5,7 +5,7 @@ from lerobot.common.robot_devices.robots.configs import RobotConfig from lerobot.configs import parser -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig @dataclass @@ -39,7 +39,7 @@ class RecordControlConfig(ControlConfig): single_task: str # Root directory where the dataset will be stored (e.g. 'dataset/path'). root: str | Path | None = None - policy: PretrainedConfig | None = None + policy: PreTrainedConfig | None = None # TODO(rcadene, aliberts): By default, use device and use_amp values from policy checkpoint. device: str | None = None # cuda | cpu | mps # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, @@ -89,7 +89,7 @@ def __post_init__(self): policy_path = parser.get_path_arg("policy") if policy_path: cli_overrides = parser.get_cli_overrides("policy") - self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path if self.policy is not None: diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index f14a8bbae6..2148a1c536 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -4,7 +4,7 @@ from lerobot.common import envs, policies # noqa: F401 from lerobot.configs import parser -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig @dataclass @@ -34,7 +34,7 @@ class EvalPipelineConfig: # (useful for debugging). This argument is mutually exclusive with `--config`. env: envs.EnvConfig eval: EvalConfig = field(default_factory=EvalConfig) - policy: PretrainedConfig | None = None + policy: PreTrainedConfig | None = None output_dir: Path | None = None job_name: str | None = None # TODO(rcadene, aliberts): By default, use device and use_amp values from policy checkpoint. @@ -55,7 +55,7 @@ def __post_init__(self): policy_path = parser.get_path_arg("policy") if policy_path: cli_overrides = parser.get_cli_overrides("policy") - self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path if not self.job_name: diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index de1a977be7..1ba9d0387f 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -14,12 +14,12 @@ from lerobot.common.utils.hub import HubMixin from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature -# Generic variable that is either PretrainedConfig or a subclass thereof -T = TypeVar("T", bound="PretrainedConfig") +# Generic variable that is either PreTrainedConfig or a subclass thereof +T = TypeVar("T", bound="PreTrainedConfig") @dataclass -class PretrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC): +class PreTrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC): """ Base configuration class for policy models. diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index 911c16a626..e567f5b9a2 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -15,7 +15,7 @@ from lerobot.configs import parser from lerobot.configs.default import DatasetConfig, WandBConfig from lerobot.configs.eval import EvalConfig -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig TRAIN_CONFIG_NAME = "train_config.json" @@ -73,7 +73,7 @@ class OnlineConfig: class TrainPipelineConfig(HubMixin): dataset: DatasetConfig env: envs.EnvConfig | None = None - policy: PretrainedConfig | None = None + policy: PreTrainedConfig | None = None # Set `dir` to where you would like to save all of the run outputs. If you run another training session # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. output_dir: Path | None = None @@ -127,7 +127,7 @@ def __post_init__(self): if policy_path: # Only load the policy config cli_overrides = parser.get_cli_overrides("policy") - self.policy = PretrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path if not self.job_name: diff --git a/lerobot/scripts/push_pretrained.py b/lerobot/scripts/push_pretrained.py index 078cb0587d..e3c683f96f 100644 --- a/lerobot/scripts/push_pretrained.py +++ b/lerobot/scripts/push_pretrained.py @@ -34,7 +34,7 @@ @dataclass -class PushPretrainedConfig: +class PushPreTrainedConfig: pretrained_path: Path repo_id: str branch: str | None = None @@ -43,7 +43,7 @@ class PushPretrainedConfig: @draccus.wrap() -def main(cfg: PushPretrainedConfig): +def main(cfg: PushPreTrainedConfig): hub_api = HfApi() hub_api.create_repo( repo_id=cfg.repo_id, diff --git a/tests/test_control_robot.py b/tests/test_control_robot.py index be170071fe..e80b507c08 100644 --- a/tests/test_control_robot.py +++ b/tests/test_control_robot.py @@ -39,7 +39,7 @@ TeleoperateControlConfig, ) from lerobot.configs.default import DatasetConfig -from lerobot.configs.policies import PretrainedConfig +from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.train import TrainPipelineConfig from lerobot.scripts.control_robot import calibrate, record, replay, teleoperate from tests.test_robots import make_robot @@ -243,7 +243,7 @@ def test_record_and_replay_and_policy(tmpdir, request, robot_type, mock): use_amp=False, ) - rec_eval_cfg.policy = PretrainedConfig.from_pretrained(pretrained_policy_path) + rec_eval_cfg.policy = PreTrainedConfig.from_pretrained(pretrained_policy_path) rec_eval_cfg.policy.pretrained_path = pretrained_policy_path dataset = record(robot, rec_eval_cfg) From 4e7c4ddb51efd79f900c87075369a2a61f5dacd9 Mon Sep 17 00:00:00 2001 From: Remi Date: Fri, 24 Jan 2025 14:12:41 +0100 Subject: [PATCH 50/94] Additional fix while retraining policies (#629) Co-authored-by: Simon Alibert --- Makefile | 9 ++- lerobot/common/envs/configs.py | 2 + lerobot/common/policies/normalize.py | 13 +++-- .../policies/tdmpc/configuration_tdmpc.py | 8 +-- .../policies/vqbet/configuration_vqbet.py | 2 +- lerobot/configs/train.py | 13 +++++ lerobot/configs/types.py | 1 + lerobot/scripts/train.py | 57 ++++++++++++------- 8 files changed, 70 insertions(+), 35 deletions(-) diff --git a/Makefile b/Makefile index 073b4183ac..02ac76020e 100644 --- a/Makefile +++ b/Makefile @@ -131,13 +131,12 @@ test-tdmpc-ete-eval: --eval.batch_size=1 \ --device=$(DEVICE) -# FIXME: currently broken -# test-tdmpc-ete-train-with-online: -# python lerobot/scripts/train.py \ +test-tdmpc-ete-train-with-online: + python lerobot/scripts/train.py \ --policy.type=tdmpc \ --env.type=pusht \ --env.obs_type=environment_state_agent_pos \ - --env.episode_length=10 \ + --env.episode_length=5 \ --dataset.repo_id=lerobot/pusht_keypoints \ --dataset.image_transforms.enable=true \ --dataset.episodes='[0]' \ @@ -147,7 +146,7 @@ test-tdmpc-ete-eval: --online.rollout_n_episodes=2 \ --online.rollout_batch_size=2 \ --online.steps_between_rollouts=10 \ - --online.buffer_capacity=15 \ + --online.buffer_capacity=1000 \ --online.env_seed=10000 \ --save_checkpoint=false \ --save_freq=10 \ diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py index ac96f5c59b..6259ca94a2 100644 --- a/lerobot/common/envs/configs.py +++ b/lerobot/common/envs/configs.py @@ -99,6 +99,7 @@ def gym_kwargs(self) -> dict: "render_mode": self.render_mode, "visualization_width": self.visualization_width, "visualization_height": self.visualization_height, + "max_episode_steps": self.episode_length, } @@ -137,4 +138,5 @@ def gym_kwargs(self) -> dict: "render_mode": self.render_mode, "visualization_width": self.visualization_width, "visualization_height": self.visualization_height, + "max_episode_steps": self.episode_length, } diff --git a/lerobot/common/policies/normalize.py b/lerobot/common/policies/normalize.py index d8f021d938..0d188d8157 100644 --- a/lerobot/common/policies/normalize.py +++ b/lerobot/common/policies/normalize.py @@ -37,8 +37,8 @@ def create_stats_buffers( stats_buffers = {} for key, ft in features.items(): - norm_mode = norm_map.get(ft.type, None) - if norm_mode is None: + norm_mode = norm_map.get(ft.type, NormalizationMode.IDENTITY) + if norm_mode is NormalizationMode.IDENTITY: continue assert isinstance(norm_mode, NormalizationMode) @@ -140,8 +140,8 @@ def __init__( def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) # shallow copy avoids mutating the input batch for key, ft in self.features.items(): - norm_mode = self.norm_map.get(ft.type, None) - if norm_mode is None: + norm_mode = self.norm_map.get(ft.type, NormalizationMode.IDENTITY) + if norm_mode is NormalizationMode.IDENTITY: continue buffer = getattr(self, "buffer_" + key.replace(".", "_")) @@ -210,7 +210,10 @@ def __init__( def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch = dict(batch) # shallow copy avoids mutating the input batch for key, ft in self.features.items(): - norm_mode = self.norm_map.get(ft.type, None) + norm_mode = self.norm_map.get(ft.type, NormalizationMode.IDENTITY) + if norm_mode is NormalizationMode.IDENTITY: + continue + buffer = getattr(self, "buffer_" + key.replace(".", "_")) if norm_mode is NormalizationMode.MEAN_STD: diff --git a/lerobot/common/policies/tdmpc/configuration_tdmpc.py b/lerobot/common/policies/tdmpc/configuration_tdmpc.py index 5b128ec552..c3e8aee687 100644 --- a/lerobot/common/policies/tdmpc/configuration_tdmpc.py +++ b/lerobot/common/policies/tdmpc/configuration_tdmpc.py @@ -112,11 +112,11 @@ class TDMPCConfig(PreTrainedConfig): horizon: int = 5 n_action_steps: int = 1 - normalization_mapping: dict[str, NormalizationMode | None] = field( + normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { - "VISUAL": None, - "STATE": None, - "ENV": None, + "VISUAL": NormalizationMode.IDENTITY, + "STATE": NormalizationMode.IDENTITY, + "ENV": NormalizationMode.IDENTITY, "ACTION": NormalizationMode.MIN_MAX, } ) diff --git a/lerobot/common/policies/vqbet/configuration_vqbet.py b/lerobot/common/policies/vqbet/configuration_vqbet.py index 2f3617cc68..47007e8231 100644 --- a/lerobot/common/policies/vqbet/configuration_vqbet.py +++ b/lerobot/common/policies/vqbet/configuration_vqbet.py @@ -98,7 +98,7 @@ class VQBeTConfig(PreTrainedConfig): normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { - "VISUAL": NormalizationMode.MEAN_STD, + "VISUAL": NormalizationMode.IDENTITY, "STATE": NormalizationMode.MIN_MAX, "ACTION": NormalizationMode.MIN_MAX, } diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index e567f5b9a2..b540f2f0a7 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -68,6 +68,19 @@ class OnlineConfig: # + eval + environment rendering simultaneously. do_rollout_async: bool = False + def __post_init__(self): + if self.steps == 0: + return + + if self.steps_between_rollouts is None: + raise ValueError( + "'steps_between_rollouts' must be set to a positive integer, but it is currently None." + ) + if self.env_seed is None: + raise ValueError("'env_seed' must be set to a positive integer, but it is currently None.") + if self.buffer_capacity is None: + raise ValueError("'buffer_capacity' must be set to a positive integer, but it is currently None.") + @dataclass class TrainPipelineConfig(HubMixin): diff --git a/lerobot/configs/types.py b/lerobot/configs/types.py index f31f437b1e..0ca45a1975 100644 --- a/lerobot/configs/types.py +++ b/lerobot/configs/types.py @@ -15,6 +15,7 @@ class FeatureType(str, Enum): class NormalizationMode(str, Enum): MIN_MAX = "MIN_MAX" MEAN_STD = "MEAN_STD" + IDENTITY = "IDENTITY" class DictLike(Protocol): diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index a7d3fa543f..78facdeea6 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -354,15 +354,17 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): online_buffer_path, data_spec={ **{ - ft.key: {"shape": ft.shape, "dtype": np.dtype("float32")} - for ft in policy.config.input_features + key: {"shape": ft.shape, "dtype": np.dtype("float32")} + for key, ft in policy.config.input_features.items() }, **{ - ft.key: {"shape": ft.shape, "dtype": np.dtype("float32")} - for ft in policy.config.output_features + key: {"shape": ft.shape, "dtype": np.dtype("float32")} + for key, ft in policy.config.output_features.items() }, "next.reward": {"shape": (), "dtype": np.dtype("float32")}, "next.done": {"shape": (), "dtype": np.dtype("?")}, + "task_index": {"shape": (), "dtype": np.dtype("int64")}, + # FIXME: 'next.success' is expected by pusht env but not xarm "next.success": {"shape": (), "dtype": np.dtype("?")}, }, buffer_capacity=cfg.online.buffer_capacity, @@ -400,12 +402,14 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): ) dl_iter = cycle(dataloader) - # Lock and thread pool executor for asynchronous online rollouts. When asynchronous mode is disabled, - # these are still used but effectively do nothing. - lock = Lock() - # Note: 1 worker because we only ever want to run one set of online rollouts at a time. Batch - # parallelization of rollouts is handled within the job. - executor = ThreadPoolExecutor(max_workers=1) + if cfg.online.do_rollout_async: + # Lock and thread pool executor for asynchronous online rollouts. + lock = Lock() + # Note: 1 worker because we only ever want to run one set of online rollouts at a time. Batch + # parallelization of rollouts is handled within the job. + executor = ThreadPoolExecutor(max_workers=1) + else: + lock = None online_step = 0 online_rollout_s = 0 # time take to do online rollout @@ -424,10 +428,13 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): def sample_trajectory_and_update_buffer(): nonlocal rollout_start_seed - with lock: + + with lock if lock is not None else nullcontext(): online_rollout_policy.load_state_dict(policy.state_dict()) + online_rollout_policy.eval() start_rollout_time = time.perf_counter() + with torch.no_grad(): eval_info = eval_policy( online_env, @@ -440,7 +447,14 @@ def sample_trajectory_and_update_buffer(): ) online_rollout_s = time.perf_counter() - start_rollout_time - with lock: + if len(offline_dataset.meta.tasks) > 1: + raise NotImplementedError("Add support for multi task.") + + # Hack to add a task to the online_dataset (0 is the first task of the offline_dataset) + total_num_frames = eval_info["episodes"]["index"].shape[0] + eval_info["episodes"]["task_index"] = torch.tensor([0] * total_num_frames, dtype=torch.int64) + + with lock if lock is not None else nullcontext(): start_update_buffer_time = time.perf_counter() online_dataset.add_data(eval_info["episodes"]) @@ -463,11 +477,14 @@ def sample_trajectory_and_update_buffer(): return online_rollout_s, update_online_buffer_s - future = executor.submit(sample_trajectory_and_update_buffer) - # If we aren't doing async rollouts, or if we haven't yet gotten enough examples in our buffer, wait - # here until the rollout and buffer update is done, before proceeding to the policy update steps. - if not cfg.online.do_rollout_async or len(online_dataset) <= cfg.online.buffer_seed_size: - online_rollout_s, update_online_buffer_s = future.result() + if lock is None: + online_rollout_s, update_online_buffer_s = sample_trajectory_and_update_buffer() + else: + future = executor.submit(sample_trajectory_and_update_buffer) + # If we aren't doing async rollouts, or if we haven't yet gotten enough examples in our buffer, wait + # here until the rollout and buffer update is done, before proceeding to the policy update steps. + if len(online_dataset) <= cfg.online.buffer_seed_size: + online_rollout_s, update_online_buffer_s = future.result() if len(online_dataset) <= cfg.online.buffer_seed_size: logging.info(f"Seeding online buffer: {len(online_dataset)}/{cfg.online.buffer_seed_size}") @@ -475,7 +492,7 @@ def sample_trajectory_and_update_buffer(): policy.train() for _ in range(cfg.online.steps_between_rollouts): - with lock: + with lock if lock is not None else nullcontext(): start_time = time.perf_counter() batch = next(dl_iter) dataloading_s = time.perf_counter() - start_time @@ -498,7 +515,7 @@ def sample_trajectory_and_update_buffer(): train_info["online_rollout_s"] = online_rollout_s train_info["update_online_buffer_s"] = update_online_buffer_s train_info["await_update_online_buffer_s"] = await_update_online_buffer_s - with lock: + with lock if lock is not None else nullcontext(): train_info["online_buffer_size"] = len(online_dataset) if step % cfg.log_freq == 0: @@ -513,7 +530,7 @@ def sample_trajectory_and_update_buffer(): # If we're doing async rollouts, we should now wait until we've completed them before proceeding # to do the next batch of rollouts. - if future.running(): + if cfg.online.do_rollout_async and future.running(): start = time.perf_counter() online_rollout_s, update_online_buffer_s = future.result() await_update_online_buffer_s = time.perf_counter() - start From 48d1817d7de7a2eb3c8383b00a660470a5b43b50 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 24 Jan 2025 14:19:25 +0100 Subject: [PATCH 51/94] Actually reactivate tdmpc online test --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 02ac76020e..d5c4d5e795 100644 --- a/Makefile +++ b/Makefile @@ -26,9 +26,8 @@ test-end-to-end: ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval + ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train-with-online -# ${MAKE} DEVICE=$(DEVICE) test-default-ete-eval -# ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train-with-online # ${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial test-act-ete-train: From 1ee59ed0a04ef351374dc1f4a231ac750e41c112 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 24 Jan 2025 18:31:29 +0100 Subject: [PATCH 52/94] Update example 4 --- examples/4_train_policy_with_script.md | 276 +++++++++++++------------ 1 file changed, 149 insertions(+), 127 deletions(-) diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md index 2e443d57bd..8de6bad089 100644 --- a/examples/4_train_policy_with_script.md +++ b/examples/4_train_policy_with_script.md @@ -1,193 +1,212 @@ -This tutorial will explain the training script, how to use it, and particularly the use of Hydra to configure everything needed for the training run. +This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run. +> **Note:** The following assume you're running these commands on a machine equiped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--device=cpu` (`--device=mps` respectively). However, be advised that the code executes much slower on cpu. + ## The training script LeRobot offers a training script at [`lerobot/scripts/train.py`](../../lerobot/scripts/train.py). At a high level it does the following: -- Loads a Hydra configuration file for the following steps (more on Hydra in a moment). -- Makes a simulation environment. -- Makes a dataset corresponding to that simulation environment. -- Makes a policy. +- Initialize/load a configuration for the following steps using. +- Instantiates a dataset. +- (Optional) Instantiates a simulation environment corresponding to that dataset. +- Instantiates a policy. - Runs a standard training loop with forward pass, backward pass, optimization step, and occasional logging, evaluation (of the policy on the environment), and checkpointing. -## Basics of how we use Hydra - -Explaining the ins and outs of [Hydra](https://hydra.cc/docs/intro/) is beyond the scope of this document, but here we'll share the main points you need to know. - -First, `lerobot/configs` has a directory structure like this: +## Overview of the configuration system -``` -. -├── default.yaml -├── env -│ ├── aloha.yaml -│ ├── pusht.yaml -│ └── xarm.yaml -└── policy - ├── act.yaml - ├── diffusion.yaml - └── tdmpc.yaml +In the training script, the main function `train` expects a `TrainPipelineConfig` object: +```python +# train.py +@parser.wrap() +def train(cfg: TrainPipelineConfig): ``` -**_For brevity, in the rest of this document we'll drop the leading `lerobot/configs` path. So `default.yaml` really refers to `lerobot/configs/default.yaml`._** +You can inspect the `TrainPipelineConfig` defined in [`lerobot/configs/train.py`](../../lerobot/configs/train.py) (which is heavily commented and meant to be a reference to understand any option) -When you run the training script with +When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated for this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionnaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.) +Let's have a look at a simplified example. Amongst other attributes, the training config has the following attributes: ```python -python lerobot/scripts/train.py +@dataclass +class TrainPipelineConfig: + dataset: DatasetConfig + env: envs.EnvConfig | None = None + policy: PreTrainedConfig | None = None ``` - -Hydra is set up to read `default.yaml` (via the `@hydra.main` decorator). If you take a look at the `@hydra.main`'s arguments you will see `config_path="../configs", config_name="default"`. At the top of `default.yaml`, is a `defaults` section which looks likes this: - -```yaml -defaults: - - _self_ - - env: pusht - - policy: diffusion +in which `DatasetConfig` for example is defined as such: +```python +@dataclass +class DatasetConfig: + repo_id: str + episodes: list[int] | None = None + video_backend: str = "pyav" ``` -This logic tells Hydra to incorporate configuration parameters from `env/pusht.yaml` and `policy/diffusion.yaml`. _Note: Be aware of the order as any configuration parameters with the same name will be overidden. Thus, `default.yaml` is overridden by `env/pusht.yaml` which is overidden by `policy/diffusion.yaml`_. +This creates a hierarchical relationship where, for exemple assuming we have a `cfg` instance of `TrainPipelineConfig`, we can access the `repo_id` value with `cfg.dataset.repo_id`. +From the command line, we can specify this value with using a very similar syntax `--dataset.repo_id=repo/id`. -Then, `default.yaml` also contains common configuration parameters such as `device: cuda` or `use_amp: false` (for enabling fp16 training). Some other parameters are set to `???` which indicates that they are expected to be set in additional yaml files. For instance, `training.offline_steps: ???` in `default.yaml` is set to `200000` in `diffusion.yaml`. +By default, every field takes its default value specified in the dataclass. If a field doesn't have a default value, it needs to be specified either from the command line or from a config file – which path is also given in the command line (more in this below). In the example above, the `dataset` field doesn't have a default value which means it must be specified. -Thanks to this `defaults` section in `default.yaml`, if you want to train Diffusion Policy with PushT, you really only need to run: - -```bash -python lerobot/scripts/train.py -``` -However, you can be more explicit and launch the exact same Diffusion Policy training on PushT with: +## Specifying values from the CLI +Let's say that we want to train [Diffusion Policy](../../lerobot/common/policies/diffusion) on the [pusht](https://huggingface.co/datasets/lerobot/pusht) dataset, using the [gym_pusht](https://github.com/huggingface/gym-pusht) environment for evaluation. The command to do so would look like this: ```bash -python lerobot/scripts/train.py policy=diffusion env=pusht +python lerobot/scripts/train.py \ + --dataset.repo_id=lerobot/pusht \ + --policy.type=diffusion \ + --env.type=pusht ``` -This way of overriding defaults via the CLI is especially useful when you want to change the policy and/or environment. For instance, you can train ACT on the default Aloha environment with: +Let's break this down: +- To specify the dataset, we just need to specify its `repo_id` on the hub which is the only required argument in the `DatasetConfig`. The rest of the fields have default values and in this case we are fine with those so we can just add the option `--dataset.repo_id=lerobot/pusht`. +- To specify the policy, we can just select diffusion policy using `--policy` appended with `.type`. Here, `.type` is a special argument which allows us to select config classes inheriting from `draccus.ChoiceRegistry` and that have been decorated with the `register_subclass()` method. To have a better explanation of this feature, have a look at this [Draccus demo](https://github.com/dlwh/draccus?tab=readme-ov-file#more-flexible-configuration-with-choice-types). In our code, we use this mechanism mainly to select policies, environments, robots, and some other components like optimizers. The policies available to select are located in [lerobot/common/policies](../../lerobot/common/policies) +- Similarly, we select the environment with `--env.type=pusht`. The different environment configs are available in [`lerobot/common/envs/configs.py`](../../lerobot/common/envs/configs.py) +Let's see another example. Let's say you've been training [ACT](../../lerobot/common/policies/act) on [lerobot/aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) using the [gym-aloha](https://github.com/huggingface/gym-aloha) environment for evaluation with: ```bash -python lerobot/scripts/train.py policy=act env=aloha +python lerobot/scripts/train.py \ + --policy.type=act \ + --dataset.repo_id=lerobot/aloha_sim_insertion_human \ + --env.type=aloha \ + --output_dir=outputs/train/act_aloha_insertion ``` +> Notice we added `--output_dir` to explicitly tell where to write outputs from this run (checkpoints, training state, configs etc.). This is not mandatory and if you don't specify it, a default directory will be created from the current date and time, env.type and policy.type. This will typically look like `outputs/train/2025-01-24/16-10-05_aloha_act`. -There are two things to note here: -- Config overrides are passed as `param_name=param_value`. -- Here we have overridden the defaults section. `policy=act` tells Hydra to use `policy/act.yaml`, and `env=aloha` tells Hydra to use `env/aloha.yaml`. - -_As an aside: we've set up all of our configurations so that they reproduce state-of-the-art results from papers in the literature._ - -## Overriding configuration parameters in the CLI - -Now let's say that we want to train on a different task in the Aloha environment. If you look in `env/aloha.yaml` you will see something like: - -```yaml -# lerobot/configs/env/aloha.yaml -env: - task: AlohaInsertion-v0 +We now want to train a different policy for aloha on another task. We'll change the dataset and use [lerobot/aloha_sim_transfer_cube_human](https://huggingface.co/datasets/lerobot/aloha_sim_transfer_cube_human) instead. Of course, we also need to change the task of the environment as well to match this other task. +Looking at the [`AlohaEnv`](../../lerobot/common/envs/configs.py) config, the task is `"AlohaInsertion-v0"` by default, which corresponds to the task we trained on in the command above. The [gym-aloha](https://github.com/huggingface/gym-aloha?tab=readme-ov-file#description) environment also has the `AlohaTransferCube-v0` task which corresponds to this other task we want to train on. Putting this together, we can train this new policy on this different task using: +```bash +python lerobot/scripts/train.py \ + --policy.type=act \ + --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ + --env.type=aloha \ + --env.task=AlohaTransferCube-v0 \ + --output_dir=outputs/train/act_aloha_transfer +``` + +## Loading from a config file + +Now, let's assume that we want to reproduce the run just above. That run has produced a `train_config.json` file in its checkpoints, which serializes the `TrainPipelineConfig` instance it used: +```json +{ + "dataset": { + "repo_id": "lerobot/aloha_sim_transfer_cube_human", + "episodes": null, + ... + }, + "env": { + "type": "aloha", + "task": "AlohaTransferCube-v0", + "fps": 50, + ... + }, + "policy": { + "type": "act", + "n_obs_steps": 1, + ... + }, + ... +} +``` + +We can then simply load the config values from this file using: +```bash +python lerobot/scripts/train.py --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/train_config.json \ + --output_dir=outputs/train/act_aloha_transfer_2 ``` +`--config_path` is also a special argument which allows to initialize the config from a local config file. -And if you look in `policy/act.yaml` you will see something like: - -```yaml -# lerobot/configs/policy/act.yaml -dataset_repo_id: lerobot/aloha_sim_insertion_human +Similarly to Hydra, we can still override some parameters in the CLI if we want to, e.g.: +```bash +python lerobot/scripts/train.py --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/train_config.json \ + --output_dir=outputs/train/act_aloha_transfer_2 + --policy.n_action_steps=80 ``` +> Note: While `--output_dir` is not required in general, in this case we need to specify it since it will otherwise take the value from the `train_config.json` (which is `outputs/train/act_aloha_transfer`). In order to prevent accidental deletion of previous run checkpoints, we raise an error if you're trying to write in an existing directory. This is not the case when resuming a run, which is what you'll learn next. -But our Aloha environment actually supports a cube transfer task as well. To train for this task, you could manually modify the two yaml configuration files respectively. +## Resume training -First, we'd need to switch to using the cube transfer task for the ALOHA environment. +Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to that here. -```diff -# lerobot/configs/env/aloha.yaml -env: -- task: AlohaInsertion-v0 -+ task: AlohaTransferCube-v0 +Let's reuse the command from the previous run and add a few more options: +```bash +python lerobot/scripts/train.py \ + --policy.type=act \ + --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \ + --env.type=aloha \ + --env.task=AlohaTransferCube-v0 \ + --log_freq=25 \ + --save_freq=100 \ + --output_dir=outputs/train/run_resumption ``` -Then, we'd also need to switch to using the cube transfer dataset. - -```diff -# lerobot/configs/policy/act.yaml --dataset_repo_id: lerobot/aloha_sim_insertion_human -+dataset_repo_id: lerobot/aloha_sim_transfer_cube_human +Here we've taken care to set up the log frequency and checkpointing frequency to low numbers so we can showcase resumption. You should be able to see some logging and have a first checkpoint within 1 minute (depending on hardware). Wait for the first checkpoint to happen, you should see a line that looks like this in your terminal: ``` - -Then, you'd be able to run: - -```bash -python lerobot/scripts/train.py policy=act env=aloha +INFO 2025-01-24 16:10:56 ts/train.py:263 Checkpoint policy after step 100 ``` - -and you'd be training and evaluating on the cube transfer task. - -An alternative approach to editing the yaml configuration files, would be to override the defaults via the command line: - +Now let's simulate a crash by killing the process (hit `ctrl`+`c`). We can then simply resume this run from the last checkpoint available with: ```bash python lerobot/scripts/train.py \ - policy=act \ - dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \ - env=aloha \ - env.task=AlohaTransferCube-v0 + --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/train_config.json \ + --resume=true ``` -There's something new here. Notice the `.` delimiter used to traverse the configuration hierarchy. _But be aware that the `defaults` section is an exception. As you saw above, we didn't need to write `defaults.policy=act` in the CLI. `policy=act` was enough._ - -Putting all that knowledge together, here's the command that was used to train https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human. +You should see from the logging that your training picks up from where it left off. +Another reason for which you might want to resume a run is simply to extend training and add more training steps. The number of training steps is set by the option `--offline.steps`, which is 100 000 by default. +You could double the number of steps of the previous run with: ```bash python lerobot/scripts/train.py \ - hydra.run.dir=outputs/train/act_aloha_sim_transfer_cube_human \ - device=cuda - env=aloha \ - env.task=AlohaTransferCube-v0 \ - dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \ - policy=act \ - training.eval_freq=10000 \ - training.log_freq=250 \ - training.offline_steps=100000 \ - training.save_model=true \ - training.save_freq=25000 \ - eval.n_episodes=50 \ - eval.batch_size=50 \ - wandb.enable=false \ + --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/train_config.json \ + --resume=true \ + --offline.steps=200000 ``` -There's one new thing here: `hydra.run.dir=outputs/train/act_aloha_sim_transfer_cube_human`, which specifies where to save the training output. - -## Using a configuration file not in `lerobot/configs` - -Above we discusses the our training script is set up such that Hydra looks for `default.yaml` in `lerobot/configs`. But, if you have a configuration file elsewhere in your filesystem you may use: - +## Outputs of a run +In the output directory, there will be a folder called `checkpoints` with the following structure: ```bash -python lerobot/scripts/train.py --config-dir PARENT/PATH --config-name FILE_NAME_WITHOUT_EXTENSION +outputs/train/run_resumption/checkpoints +├── 000100 # checkpoint_dir for training step 100 +│   ├── pretrained_model +│   │   ├── config.json # pretrained policy config +│   │   ├── model.safetensors # model weights +│   │   ├── train_config.json # train config +│ │ └── README.md # model card +│   └── training_state.pth # optimizer/scheduler/rng state and training step +. +└── last -> /Users/simon/projects/lerobot/outputs/train/run_resumption/checkpoints/000500 # symlink to the last available checkpoint ``` -Note: here we use regular syntax for providing CLI arguments to a Python script, not Hydra's `param_name=param_value` syntax. +## Fine-tuning a pre-trained policy -As a concrete example, this becomes particularly handy when you have a folder with training outputs, and would like to re-run the training. For example, say you previously ran the training script with one of the earlier commands and have `outputs/train/my_experiment/checkpoints/pretrained_model/config.yaml`. This `config.yaml` file will have the full set of configuration parameters within it. To run the training with the same configuration again, do: +In addition to the features currently in Draccus, we've added a special `.path` argument for the policy, which allows to load a policy as you would with `PreTrainedPolicy.from_pretrained()`. In that case, `path` can be a local directory that contains a checkpoint or a repo_id pointing to a pretrained policy on the hub. +For example, we could fine-tune a [policy pre-trained on the aloha transfer task](https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human) on the aloha insertion task. +We can achieve this with: ```bash -python lerobot/scripts/train.py --config-dir outputs/train/my_experiment/checkpoints/last/pretrained_model --config-name config +python lerobot/scripts/train.py \ + --policy.path=lerobot/act_aloha_sim_transfer_cube_human \ + --dataset.repo_id=lerobot/aloha_sim_insertion_human \ + --env.type=aloha \ + --env.task=AlohaInsertion-v0 ``` -Note that you may still use the regular syntax for config parameter overrides (eg: by adding `training.offline_steps=200000`). +When doing so, keep in mind that the features of the fine-tuning dataset would have to match the input/output features of the pretrained policy. ## Typical logs and metrics -When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you config it correctly and your config is not overrided by other files. The final configuration will also be saved with the checkpoint. +When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you configured your run correctly. The final configuration will also be saved with the checkpoint. After that, you will see training log like this one: - ``` INFO 2024-08-14 13:35:12 ts/train.py:192 step:0 smpl:64 ep:1 epch:0.00 loss:1.112 grdn:15.387 lr:2.0e-07 updt_s:1.738 data_s:4.774 ``` - -or evaluation log like: - +or evaluation log: ``` INFO 2024-08-14 13:38:45 ts/train.py:226 step:100 smpl:6K ep:52 epch:0.25 ∑rwrd:20.693 success:0.0% eval_s:120.266 ``` These logs will also be saved in wandb if `wandb.enable` is set to `true`. Here are the meaning of some abbreviations: - - `smpl`: number of samples seen during training. - `ep`: number of episodes seen during training. An episode contains multiple samples in a complete manipulation task. - `epch`: number of time all unique samples are seen (epoch). @@ -202,12 +221,15 @@ Some metrics are useful for initial performance profiling. For example, if you f --- -So far we've seen how to train Diffusion Policy for PushT and ACT for ALOHA. Now, what if we want to train ACT for PushT? Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch): - +We've seen how to train Diffusion Policy for PushT and ACT for ALOHA. What if we want to train ACT for PushT? ```bash -python lerobot/scripts/train.py policy=act env=pusht dataset_repo_id=lerobot/pusht +python lerobot/scripts/train.py \ + --policy.type=act \ + --env.type=pusht \ + --dataset.repo_id=lerobot/pusht ``` -Please, head on over to our [advanced tutorial on adapting policy configuration to various environments](./advanced/train_act_pusht/train_act_pusht.md) to learn more. +Now that you know the basics of how to train a policy, you might want to know how to apply this knowledge to actual robots, or how to record your own datasets and train policies on your specific task? +If that's the case, head over to the next tutorial [`7_get_started_with_real_robot.md`](./7_get_started_with_real_robot.md). Or in the meantime, happy coding! 🤗 From 3b1e64bb35d6907e1dad9f83548a29e162b76991 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 24 Jan 2025 18:33:25 +0100 Subject: [PATCH 53/94] Remove advanced example 1 --- Makefile | 21 ----- .../advanced/1_train_act_pusht/act_pusht.yaml | 87 ------------------- .../1_train_act_pusht/train_act_pusht.md | 70 --------------- 3 files changed, 178 deletions(-) delete mode 100644 examples/advanced/1_train_act_pusht/act_pusht.yaml delete mode 100644 examples/advanced/1_train_act_pusht/train_act_pusht.md diff --git a/Makefile b/Makefile index d5c4d5e795..53305474a0 100644 --- a/Makefile +++ b/Makefile @@ -28,8 +28,6 @@ test-end-to-end: ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train-with-online -# ${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial - test-act-ete-train: python lerobot/scripts/train.py \ --policy.type=act \ @@ -155,22 +153,3 @@ test-tdmpc-ete-train-with-online: --eval.batch_size=1 \ --device=$(DEVICE) \ --output_dir=tests/outputs/tdmpc_online/ - -# TODO: do we keep this one? -# test-act-pusht-tutorial: -# cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/created_by_Makefile.yaml -# python lerobot/scripts/train.py \ -# policy=created_by_Makefile.yaml \ -# env=pusht \ -# wandb.enable=False \ -# training.offline_steps=2 \ -# eval.n_episodes=1 \ -# eval.batch_size=1 \ -# env.episode_length=2 \ -# device=$(DEVICE) \ -# training.save_model=true \ -# training.save_freq=2 \ -# training.batch_size=2 \ -# training.image_transforms.enable=true \ -# hydra.run.dir=tests/outputs/act_pusht/ -# rm lerobot/configs/policy/created_by_Makefile.yaml diff --git a/examples/advanced/1_train_act_pusht/act_pusht.yaml b/examples/advanced/1_train_act_pusht/act_pusht.yaml deleted file mode 100644 index 4963e11c02..0000000000 --- a/examples/advanced/1_train_act_pusht/act_pusht.yaml +++ /dev/null @@ -1,87 +0,0 @@ -# @package _global_ - -# Change the seed to match what PushT eval uses -# (to avoid evaluating on seeds used for generating the training data). -seed: 100000 -# Change the dataset repository to the PushT one. -dataset_repo_id: lerobot/pusht - -override_dataset_stats: - observation.image: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - -training: - offline_steps: 80000 - online_steps: 0 - eval_freq: 10000 - save_freq: 100000 - log_freq: 250 - save_model: true - - batch_size: 8 - lr: 1e-5 - lr_backbone: 1e-5 - weight_decay: 1e-4 - grad_clip_norm: 10 - online_steps_between_rollouts: 1 - - delta_timestamps: - action: "[i / ${fps} for i in range(${policy.chunk_size})]" - -eval: - n_episodes: 50 - batch_size: 50 - -# See `configuration_act.py` for more details. -policy: - name: act - - # Input / output structure. - n_obs_steps: 1 - chunk_size: 100 # chunk_size - n_action_steps: 100 - - input_shapes: - observation.image: [3, 96, 96] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.image: mean_std - # Use min_max normalization just because it's more standard. - observation.state: min_max - output_normalization_modes: - # Use min_max normalization just because it's more standard. - action: min_max - - # Architecture. - # Vision backbone. - vision_backbone: resnet18 - pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 - replace_final_stride_with_dilation: false - # Transformer layers. - pre_norm: false - dim_model: 512 - n_heads: 8 - dim_feedforward: 3200 - feedforward_activation: relu - n_encoder_layers: 4 - # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code - # that means only the first layer is used. Here we match the original implementation by setting this to 1. - # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. - n_decoder_layers: 1 - # VAE. - use_vae: true - latent_dim: 32 - n_vae_encoder_layers: 4 - - # Inference. - temporal_ensemble_coeff: null - - # Training and loss computation. - dropout: 0.1 - kl_weight: 10.0 diff --git a/examples/advanced/1_train_act_pusht/train_act_pusht.md b/examples/advanced/1_train_act_pusht/train_act_pusht.md deleted file mode 100644 index 0258c9916b..0000000000 --- a/examples/advanced/1_train_act_pusht/train_act_pusht.md +++ /dev/null @@ -1,70 +0,0 @@ -In this tutorial we will learn how to adapt a policy configuration to be compatible with a new environment and dataset. As a concrete example, we will adapt the default configuration for ACT to be compatible with the PushT environment and dataset. - -If you haven't already read our tutorial on the [training script and configuration tooling](../4_train_policy_with_script.md) please do so prior to tackling this tutorial. - -Let's get started! - -Suppose we want to train ACT for PushT. Well, there are aspects of the ACT configuration that are specific to the ALOHA environments, and these happen to be incompatible with PushT. Therefore, trying to run the following will almost certainly raise an exception of sorts (eg: feature dimension mismatch): - -```bash -python lerobot/scripts/train.py policy=act env=pusht dataset_repo_id=lerobot/pusht -``` - -We need to adapt the parameters of the ACT policy configuration to the PushT environment. The most important ones are the image keys. - -ALOHA's datasets and environments typically use a variable number of cameras. In `lerobot/configs/policy/act.yaml` you may notice two relevant sections. Here we show you the minimal diff needed to adjust to PushT: - -```diff -override_dataset_stats: -- observation.images.top: -+ observation.image: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - -policy: - input_shapes: -- observation.images.top: [3, 480, 640] -+ observation.image: [3, 96, 96] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - input_normalization_modes: -- observation.images.top: mean_std -+ observation.image: mean_std - observation.state: min_max - output_normalization_modes: - action: min_max -``` - -Here we've accounted for the following: -- PushT uses "observation.image" for its image key. -- PushT provides smaller images. - -_Side note: technically we could override these via the CLI, but with many changes it gets a bit messy, and we also have a bit of a challenge in that we're using `.` in our observation keys which is treated by Hydra as a hierarchical separator_. - -For your convenience, we provide [`act_pusht.yaml`](./act_pusht.yaml) in this directory. It contains the diff above, plus some other (optional) ones that are explained within. Please copy it into `lerobot/configs/policy` with: - -```bash -cp examples/advanced/1_train_act_pusht/act_pusht.yaml lerobot/configs/policy/act_pusht.yaml -``` - -(remember from a [previous tutorial](../4_train_policy_with_script.md) that Hydra will look in the `lerobot/configs` directory). Now try running the following. - - -```bash -python lerobot/scripts/train.py policy=act_pusht env=pusht -``` - -Notice that this is much the same as the command that failed at the start of the tutorial, only: -- Now we are using `policy=act_pusht` to point to our new configuration file. -- We can drop `dataset_repo_id=lerobot/pusht` as the change is incorporated in our new configuration file. - -Hurrah! You're now training ACT for the PushT environment. - ---- - -The bottom line of this tutorial is that when training policies for different environments and datasets you will need to understand what parts of the policy configuration are specific to those and make changes accordingly. - -Happy coding! 🤗 From a5ab25aed084f4d1cff5ad4bc744cc1c1c4a3a8c Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 24 Jan 2025 18:34:44 +0100 Subject: [PATCH 54/94] Remove example 5 --- examples/5_resume_training.md | 37 ----------------------------------- 1 file changed, 37 deletions(-) delete mode 100644 examples/5_resume_training.md diff --git a/examples/5_resume_training.md b/examples/5_resume_training.md deleted file mode 100644 index 6e8d684dae..0000000000 --- a/examples/5_resume_training.md +++ /dev/null @@ -1,37 +0,0 @@ -This tutorial explains how to resume a training run that you've started with the training script. If you don't know how our training script and configuration system works, please read [4_train_policy_with_script.md](./4_train_policy_with_script.md) first. - -## Basic training resumption - -Let's consider the example of training ACT for one of the ALOHA tasks. Here's a command that can achieve that: - -```bash -python lerobot/scripts/train.py \ - hydra.run.dir=outputs/train/run_resumption \ - policy=act \ - dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \ - env=aloha \ - env.task=AlohaTransferCube-v0 \ - training.log_freq=25 \ - training.save_checkpoint=true \ - training.save_freq=100 -``` - -Here we're using the default dataset and environment for ACT, and we've taken care to set up the log frequency and checkpointing frequency to low numbers so we can test resumption. You should be able to see some logging and have a first checkpoint within 1 minute. Please interrupt the training after the first checkpoint. - -To resume, all that we have to do is run the training script, providing the run directory, and the resume option: - -```bash -python lerobot/scripts/train.py \ - hydra.run.dir=outputs/train/run_resumption \ - resume=true -``` - -You should see from the logging that your training picks up from where it left off. - -Note that with `resume=true`, the configuration file from the last checkpoint in the training output directory is loaded. So it doesn't matter that we haven't provided all the other configuration parameters from our previous command (although there may be warnings to notify you that your command has a different configuration than than the checkpoint). - ---- - -Now you should know how to resume your training run in case it gets interrupted or you want to extend a finished training run. - -Happy coding! 🤗 From 5d112c6a0ddfb83e36cf1a5c3741ba283871d8aa Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 24 Jan 2025 18:36:02 +0100 Subject: [PATCH 55/94] Move example 6 to advanced --- .../1_add_image_transforms.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{6_add_image_transforms.py => advanced/1_add_image_transforms.py} (100%) diff --git a/examples/6_add_image_transforms.py b/examples/advanced/1_add_image_transforms.py similarity index 100% rename from examples/6_add_image_transforms.py rename to examples/advanced/1_add_image_transforms.py From 220f81802d9a17bc1632c060fa3e73168eb4c687 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 24 Jan 2025 18:37:05 +0100 Subject: [PATCH 56/94] Use HubMixin.save_pretrained --- lerobot/common/logger.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index 9583c9110c..adcb869b13 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -34,7 +34,7 @@ from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.utils.utils import get_global_random_state -from lerobot.configs.train import TRAIN_CONFIG_NAME, TrainPipelineConfig +from lerobot.configs.train import TrainPipelineConfig from lerobot.configs.types import FeatureType, NormalizationMode PRETRAINED_MODEL = "pretrained_model" @@ -162,8 +162,7 @@ def save_model(self, save_dir: Path, policy: PreTrainedPolicy, wandb_artifact_na register_features_types() policy.save_pretrained(save_dir) # Also save the full config for the env configuration. - with open(save_dir / TRAIN_CONFIG_NAME, "w") as f: - draccus.dump(self._cfg, f, indent=4) + self._cfg.save_pretrained(save_dir) if self._wandb and not self._cfg.wandb.disable_artifact: # note wandb artifact does not accept ":" or "/" in its name artifact = self._wandb.Artifact(wandb_artifact_name, type="model") From d2d536a3c360919278c1bf2af81a8cfca5376f28 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 13:33:11 +0100 Subject: [PATCH 57/94] Enable config_path to be a repo_id --- lerobot/configs/eval.py | 7 ++--- lerobot/configs/parser.py | 59 ++++++++++++++++++++++++++++----------- lerobot/configs/train.py | 16 +++++++---- 3 files changed, 56 insertions(+), 26 deletions(-) diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index 2148a1c536..abb29beef9 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -1,5 +1,5 @@ import datetime as dt -from dataclasses import Field, dataclass, field, fields +from dataclasses import dataclass, field from pathlib import Path from lerobot.common import envs, policies # noqa: F401 @@ -75,7 +75,6 @@ def __post_init__(self): raise ValueError("Set 'use_amp' to True or False.") @classmethod - def __get_path_fields__(cls) -> list[Field]: + def __get_path_fields__(cls) -> list[str]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" - path_fields = ["policy"] - return [f for f in fields(cls) if f.name in path_fields] + return ["policy"] diff --git a/lerobot/configs/parser.py b/lerobot/configs/parser.py index 9087957493..ee78487785 100644 --- a/lerobot/configs/parser.py +++ b/lerobot/configs/parser.py @@ -1,13 +1,14 @@ import inspect import sys from argparse import ArgumentError -from dataclasses import Field from functools import wraps from pathlib import Path from typing import Sequence import draccus +from lerobot.common.utils.utils import has_method + PATH_KEY = "path" draccus.set_config_type("json") @@ -52,27 +53,49 @@ def get_type_arg(field_name: str, args: Sequence[str] | None = None) -> str | No return parse_arg(f"{field_name}.{draccus.CHOICE_TYPE_KEY}", args) -def preprocess_path_args(fields: Field | list[Field], args: Sequence[str] | None = None) -> list[str]: - if isinstance(fields, Field): - fields = [fields] +def filter_arg(field_to_filter: str, args: Sequence[str] | None = None) -> list[str]: + return [arg for arg in args if not arg.startswith(f"--{field_to_filter}=")] + + +def filter_path_args(fields_to_filter: str | list[str], args: Sequence[str] | None = None) -> list[str]: + """ + Filters command-line arguments related to fields with specific path arguments. + + Args: + fields_to_filter (str | list[str]): A single str or a list of str whose arguments need to be filtered. + args (Sequence[str] | None): The sequence of command-line arguments to be filtered. + Defaults to None. + + Returns: + list[str]: A filtered list of arguments, with arguments related to the specified + fields removed. + + Raises: + ArgumentError: If both a path argument (e.g., `--field_name.path`) and a type + argument (e.g., `--field_name.type`) are specified for the same field. + """ + if isinstance(fields_to_filter, str): + fields_to_filter = [fields_to_filter] filtered_args = args - for field in fields: - if get_path_arg(field.name, args): - if get_type_arg(field.name, args): + for field in fields_to_filter: + if get_path_arg(field, args): + if get_type_arg(field, args): raise ArgumentError( argument=None, - message=f"Cannot specify both --{field.name}.{PATH_KEY} and --{field.name}.{draccus.CHOICE_TYPE_KEY}", + message=f"Cannot specify both --{field}.{PATH_KEY} and --{field}.{draccus.CHOICE_TYPE_KEY}", ) - filtered_args = [arg for arg in filtered_args if not arg.startswith(f"--{field.name}.")] + filtered_args = [arg for arg in filtered_args if not arg.startswith(f"--{field}.")] return filtered_args def wrap(config_path: Path | None = None): """ - HACK: Similar to draccus.wrap but adds a preprocessing of CLI args in order to overload specific parts of - the config from a config file at that particular nesting level. + HACK: Similar to draccus.wrap but does two additional things: + - Will remove '.path' arguments from CLI in order to process them later on. + - If a 'config_path' is passed and the main config class has a 'from_pretrained' method, will + initialize it from there to allow to fetch configs from the hub directly """ def wrapper_outer(fn): @@ -85,11 +108,15 @@ def wrapper_inner(*args, **kwargs): args = args[1:] else: cli_args = sys.argv[1:] - get_path_fields = getattr(argtype, "__get_path_fields__", None) - if get_path_fields: - path_fields = get_path_fields() - cli_args = preprocess_path_args(path_fields, cli_args) - cfg = draccus.parse(config_class=argtype, config_path=config_path, args=cli_args) + config_path_cli = parse_arg("config_path", cli_args) + if has_method(argtype, "__get_path_fields__"): + path_fields = argtype.__get_path_fields__() + cli_args = filter_path_args(path_fields, cli_args) + if has_method(argtype, "from_pretrained") and config_path_cli: + cli_args = filter_arg("config_path", cli_args) + cfg = argtype.from_pretrained(config_path_cli, cli_args=cli_args) + else: + cfg = draccus.parse(config_class=argtype, config_path=config_path, args=cli_args) response = fn(cfg, *args, **kwargs) return response diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index b540f2f0a7..124195fd59 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -1,6 +1,6 @@ import datetime as dt import os -from dataclasses import Field, dataclass, field, fields +from dataclasses import dataclass, field from pathlib import Path from typing import Type @@ -132,6 +132,8 @@ def __post_init__(self): if self.resume: # The entire train config is already loaded, we just need to get the checkpoint dir config_path = parser.parse_arg("config_path") + if not config_path: + raise ValueError("A config_path is expected when resuming a run.") policy_path = Path(config_path).parent self.policy.pretrained_path = policy_path self.checkpoint_path = policy_path.parent @@ -169,10 +171,9 @@ def __post_init__(self): self.scheduler = self.policy.get_scheduler_preset() @classmethod - def __get_path_fields__(cls) -> list[Field]: + def __get_path_fields__(cls) -> list[str]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" - path_fields = ["policy"] - return [f for f in fields(cls) if f.name in path_fields] + return ["policy"] def _save_pretrained(self, save_directory: Path) -> None: with open(save_directory / TRAIN_CONFIG_NAME, "w") as f, draccus.config_type("json"): @@ -190,7 +191,7 @@ def from_pretrained( cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, - **policy_kwargs, + **kwargs, ) -> "TrainPipelineConfig": model_id = str(pretrained_name_or_path) config_file: str | None = None @@ -199,6 +200,8 @@ def from_pretrained( config_file = os.path.join(model_id, TRAIN_CONFIG_NAME) else: print(f"{TRAIN_CONFIG_NAME} not found in {Path(model_id).resolve()}") + elif Path(model_id).is_file(): + config_file = model_id else: try: config_file = hf_hub_download( @@ -215,4 +218,5 @@ def from_pretrained( except HfHubHTTPError as e: print(f"config.json not found on the HuggingFace Hub: {str(e)}") - return draccus.parse(cls, config_file, args=[]) + cli_args = kwargs.pop("cli_args", []) + return draccus.parse(cls, config_file, args=cli_args) From 6c4bc32c1b6e1539d619e26cd427051bcdd3bc98 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 13:33:47 +0100 Subject: [PATCH 58/94] Dry has_method --- lerobot/common/robot_devices/control_utils.py | 6 +----- lerobot/common/utils/utils.py | 2 +- lerobot/scripts/control_robot.py | 3 +-- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/lerobot/common/robot_devices/control_utils.py b/lerobot/common/robot_devices/control_utils.py index 413a897095..9368b89d64 100644 --- a/lerobot/common/robot_devices/control_utils.py +++ b/lerobot/common/robot_devices/control_utils.py @@ -21,7 +21,7 @@ from lerobot.common.datasets.utils import get_features_from_robot from lerobot.common.robot_devices.robots.utils import Robot from lerobot.common.robot_devices.utils import busy_wait -from lerobot.common.utils.utils import get_safe_torch_device +from lerobot.common.utils.utils import get_safe_torch_device, has_method def log_control_info(robot: Robot, dt_s, episode_index=None, frame_index=None, fps=None): @@ -87,10 +87,6 @@ def is_headless(): return True -def has_method(_object: object, method_name: str): - return hasattr(_object, method_name) and callable(getattr(_object, method_name)) - - def predict_action(observation, policy, device, use_amp): observation = copy(observation) with ( diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index a85712c54d..235b30cb36 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -212,5 +212,5 @@ def get_channel_first_image_shape(image_shape: tuple) -> tuple: return shape -def has_method(cls: Any, method_name: str): +def has_method(cls: object, method_name: str): return hasattr(cls, method_name) and callable(getattr(cls, method_name)) diff --git a/lerobot/scripts/control_robot.py b/lerobot/scripts/control_robot.py index 9fa9f22801..c7401ba8e6 100644 --- a/lerobot/scripts/control_robot.py +++ b/lerobot/scripts/control_robot.py @@ -128,7 +128,6 @@ ) from lerobot.common.robot_devices.control_utils import ( control_loop, - has_method, init_keyboard_listener, log_control_info, record_episode, @@ -140,7 +139,7 @@ ) from lerobot.common.robot_devices.robots.utils import Robot, make_robot_from_config from lerobot.common.robot_devices.utils import busy_wait, safe_disconnect -from lerobot.common.utils.utils import init_logging, log_say +from lerobot.common.utils.utils import has_method, init_logging, log_say ######################################################################################## # Control modes From 119b269fd33c0bee45fe90896178c8582419ed3a Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 13:35:00 +0100 Subject: [PATCH 59/94] Update example 4 --- examples/4_train_policy_with_script.md | 48 ++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md index 8de6bad089..1d6e0576d2 100644 --- a/examples/4_train_policy_with_script.md +++ b/examples/4_train_policy_with_script.md @@ -124,6 +124,13 @@ python lerobot/scripts/train.py --config_path=outputs/train/act_aloha_transfer/c ``` > Note: While `--output_dir` is not required in general, in this case we need to specify it since it will otherwise take the value from the `train_config.json` (which is `outputs/train/act_aloha_transfer`). In order to prevent accidental deletion of previous run checkpoints, we raise an error if you're trying to write in an existing directory. This is not the case when resuming a run, which is what you'll learn next. +`--config_path` can also accept the repo_id of a repo on the hub that contains a `train_config.json` file, e.g. running: +```bash +python lerobot/scripts/train.py --config_path=lerobot/diffusion_pusht +``` +will start a training run with the same configuration used for training [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht) + + ## Resume training Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to that here. @@ -181,8 +188,7 @@ outputs/train/run_resumption/checkpoints In addition to the features currently in Draccus, we've added a special `.path` argument for the policy, which allows to load a policy as you would with `PreTrainedPolicy.from_pretrained()`. In that case, `path` can be a local directory that contains a checkpoint or a repo_id pointing to a pretrained policy on the hub. -For example, we could fine-tune a [policy pre-trained on the aloha transfer task](https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human) on the aloha insertion task. -We can achieve this with: +For example, we could fine-tune a [policy pre-trained on the aloha transfer task](https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human) on the aloha insertion task. We can achieve this with: ```bash python lerobot/scripts/train.py \ --policy.path=lerobot/act_aloha_sim_transfer_cube_human \ @@ -219,6 +225,42 @@ These logs will also be saved in wandb if `wandb.enable` is set to `true`. Here Some metrics are useful for initial performance profiling. For example, if you find the current GPU utilization is low via the `nvidia-smi` command and `data_s` sometimes is too high, you may need to modify batch size or number of dataloading workers to accelerate dataloading. We also recommend [pytorch profiler](https://github.com/huggingface/lerobot?tab=readme-ov-file#improve-your-code-with-profiling) for detailed performance probing. +## In short + +We'll summarize here the main use cases to remember from this tutorial. + +#### Train a policy from scratch – CLI +```bash +python lerobot/scripts/train.py \ + --policy.type=act \ # <- select 'act' policy + --env.type=pusht \ # <- select 'pusht' environment + --dataset.repo_id=lerobot/pusht # <- train on this dataset +``` + +#### Train a policy from scratch - config file + CLI +```bash +python lerobot/scripts/train.py \ + --config_path=path/to/train_config.json \ # <- can also be a repo_id + --policy.n_action_steps=80 # <- you may still override values +``` + +#### Resume/continue a training run +```bash +python lerobot/scripts/train.py \ + --config_path=checkpoint/pretrained_model/train_config.json \ + --resume=true \ + --offline.steps=200000 # <- you can change some training parameters +``` + +#### Fine-tuning +```bash +python lerobot/scripts/train.py \ + --policy.path=lerobot/act_aloha_sim_transfer_cube_human \ # <- can also be a local path to a checkpoint + --dataset.repo_id=lerobot/aloha_sim_insertion_human \ + --env.type=aloha \ + --env.task=AlohaInsertion-v0 +``` + --- We've seen how to train Diffusion Policy for PushT and ACT for ALOHA. What if we want to train ACT for PushT? @@ -232,4 +274,4 @@ python lerobot/scripts/train.py \ Now that you know the basics of how to train a policy, you might want to know how to apply this knowledge to actual robots, or how to record your own datasets and train policies on your specific task? If that's the case, head over to the next tutorial [`7_get_started_with_real_robot.md`](./7_get_started_with_real_robot.md). -Or in the meantime, happy coding! 🤗 +Or in the meantime, happy training! 🤗 From 2d7d533337027da80b630cea0ea04cc6a112a8e1 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 13:38:06 +0100 Subject: [PATCH 60/94] Update README --- README.md | 71 +++++++++++++------------------------------------------ 1 file changed, 16 insertions(+), 55 deletions(-) diff --git a/README.md b/README.md index 849a14de55..9da708f8b9 100644 --- a/README.md +++ b/README.md @@ -222,87 +222,48 @@ Check out [example 2](./examples/2_evaluate_pretrained_policy.py) that illustrat We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht): ```bash python lerobot/scripts/eval.py \ - -p lerobot/diffusion_pusht \ - eval.n_episodes=10 \ - eval.batch_size=10 + --policy.path=lerobot/diffusion_pusht \ + --env.type=pusht \ + --eval.batch_size=10 \ + --eval.n_episodes=10 \ + --use_amp=false \ + --device=cuda ``` Note: After training your own policy, you can re-evaluate the checkpoints with: ```bash -python lerobot/scripts/eval.py -p {OUTPUT_DIR}/checkpoints/last/pretrained_model +python lerobot/scripts/eval.py --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model ``` See `python lerobot/scripts/eval.py --help` for more instructions. ### Train your own policy -Check out [example 3](./examples/3_train_policy.py) that illustrates how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line. +Check out [example 3](./examples/3_train_policy.py) that illustrate how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line. -In general, you can use our training script to easily train any policy. Here is an example of training the ACT policy on trajectories collected by humans on the Aloha simulation environment for the insertion task: +To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding `--wandb.enable=true`. -```bash -python lerobot/scripts/train.py \ - policy=act \ - env=aloha \ - env.task=AlohaInsertion-v0 \ - dataset_repo_id=lerobot/aloha_sim_insertion_human \ -``` - -The experiment directory is automatically generated and will show up in yellow in your terminal. It looks like `outputs/train/2024-05-05/20-21-12_aloha_act_default`. You can manually specify an experiment directory by adding this argument to the `train.py` python command: -```bash - hydra.run.dir=your/new/experiment/dir -``` - -In the experiment directory there will be a folder called `checkpoints` which will have the following structure: - -```bash -checkpoints -├── 000250 # checkpoint_dir for training step 250 -│ ├── pretrained_model # Hugging Face pretrained model dir -│ │ ├── config.json # Hugging Face pretrained model config -│ │ ├── config.yaml # consolidated Hydra config -│ │ ├── model.safetensors # model weights -│ │ └── README.md # Hugging Face model card -│ └── training_state.pth # optimizer/scheduler/rng state and training step -``` - -To resume training from a checkpoint, you can add these to the `train.py` python command: -```bash - hydra.run.dir=your/original/experiment/dir resume=true -``` - -It will load the pretrained model, optimizer and scheduler states for training. For more information please see our tutorial on training resumption [here](https://github.com/huggingface/lerobot/blob/main/examples/5_resume_training.md). - -To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding: - -```bash - wandb.enable=true -``` - -A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs. +A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](./examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs. ![](media/wandb.png) -Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions. +Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions. #### Reproduce state-of-the-art (SOTA) -We have organized our configuration files (found under [`lerobot/configs`](./lerobot/configs)) such that they reproduce SOTA results from a given model variant in their respective original works. Simply running: - +We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances. +You can reproduce their training by loading the config from their run. Simply running: ```bash -python lerobot/scripts/train.py policy=diffusion env=pusht +python lerobot/scripts/train.py --config_path=lerobot/diffusion_pusht ``` - reproduces SOTA results for Diffusion Policy on the PushT task. -Pretrained policies, along with reproduction details, can be found under the "Models" section of https://huggingface.co/lerobot. - ## Contribute If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md). -### Add a new dataset + ### Add a pretrained policy From 457e4bcff360241e53af5c456d9f903aa47bfb61 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 13:50:02 +0100 Subject: [PATCH 61/94] Cleanup pyproject.toml --- pyproject.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index bdfe8274a0..016ebb74a8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,9 +69,7 @@ pyrender = {git = "https://github.com/mmatl/pyrender.git", markers = "sys_platfo hello-robot-stretch-body = {version = ">=0.7.27", markers = "sys_platform == 'linux'", optional = true} pyserial = {version = ">=3.5", optional = true} jsonlines = ">=4.0.0" -# draccus = ">=0.9.4" # replace when https://github.com/dlwh/draccus/pull/29 is in release -draccus = {git = "https://github.com/dlwh/draccus.git"} -# draccus = { path = "../draccus/", develop = true} +draccus = {git = "https://github.com/dlwh/draccus.git"} # replace with draccus = ">=0.9.4" when https://github.com/dlwh/draccus/pull/29 is in release [tool.poetry.extras] From ca3e362bcd8f8d53755bdbca8223a9412b97ffd5 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 20:22:46 +0100 Subject: [PATCH 62/94] Update eval docstring --- lerobot/scripts/eval.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index c67ac31b2a..1716ea43bd 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -21,24 +21,29 @@ for 10 episodes. ``` -python lerobot/scripts/eval.py -p lerobot/diffusion_pusht eval.n_episodes=10 +python lerobot/scripts/eval.py \ + --policy.path=lerobot/diffusion_pusht \ + --env.type=pusht \ + --eval.batch_size=10 \ + --eval.n_episodes=10 \ + --use_amp=false \ + --device=cuda ``` OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes. - ``` python lerobot/scripts/eval.py \ - -p outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \ - eval.n_episodes=10 + --policy.path=outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \ + --env.type=pusht \ + --eval.batch_size=10 \ + --eval.n_episodes=10 \ + --use_amp=false \ + --device=cuda ``` -Note that in both examples, the repo/folder should contain at least `config.json`, `config.yaml` and -`model.safetensors`. +Note that in both examples, the repo/folder should contain at least `config.json` and `model.safetensors` files. -Note the formatting for providing the number of episodes. Generally, you may provide any number of arguments -with `qualified.parameter.name=value`. In this case, the parameter eval.n_episodes appears as `n_episodes` -nested under `eval` in the `config.yaml` found at -https://huggingface.co/lerobot/diffusion_pusht/tree/main. +You can learn about the CLI options for this script in the `EvalPipelineConfig` in lerobot/configs/eval.py """ import json From d3ef145cae75da865063819936f86ef428dc6c37 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 20:25:29 +0100 Subject: [PATCH 63/94] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9da708f8b9..83ee54920d 100644 --- a/README.md +++ b/README.md @@ -291,7 +291,7 @@ Once you have trained a policy you may upload it to the Hugging Face hub using a You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain: - `config.json`: A serialized version of the policy configuration (following the policy's dataclass config). - `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format. -- `config.yaml`: A consolidated Hydra training configuration containing the policy, environment, and dataset configs. The policy configuration should match `config.json` exactly. The environment config is useful for anyone who wants to evaluate your policy. The dataset config just serves as a paper trail for reproducibility. +- `train_config.json`: A consolidated configuration containing all parameter userd for training. The policy configuration should match `config.json` exactly. Thisis useful for anyone who wants to evaluate your policy or for reproducibility. To upload these to the hub, run the following: ```bash From 2bf3d752a4e2d55f4fc9fb49658bab8f193abe04 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 20:59:07 +0100 Subject: [PATCH 64/94] Clean example 4 --- examples/4_train_policy_with_script.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md index 1d6e0576d2..0a8e53ab78 100644 --- a/examples/4_train_policy_with_script.md +++ b/examples/4_train_policy_with_script.md @@ -181,7 +181,7 @@ outputs/train/run_resumption/checkpoints │ │ └── README.md # model card │   └── training_state.pth # optimizer/scheduler/rng state and training step . -└── last -> /Users/simon/projects/lerobot/outputs/train/run_resumption/checkpoints/000500 # symlink to the last available checkpoint +└── last # symlink to the last available checkpoint ``` ## Fine-tuning a pre-trained policy @@ -263,14 +263,6 @@ python lerobot/scripts/train.py \ --- -We've seen how to train Diffusion Policy for PushT and ACT for ALOHA. What if we want to train ACT for PushT? -```bash -python lerobot/scripts/train.py \ - --policy.type=act \ - --env.type=pusht \ - --dataset.repo_id=lerobot/pusht -``` - Now that you know the basics of how to train a policy, you might want to know how to apply this knowledge to actual robots, or how to record your own datasets and train policies on your specific task? If that's the case, head over to the next tutorial [`7_get_started_with_real_robot.md`](./7_get_started_with_real_robot.md). From 105e9b8cd46e2a1cf0af1a11fff3b1be3b7ba981 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sat, 25 Jan 2025 22:35:23 +0100 Subject: [PATCH 65/94] Update README --- README.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/README.md b/README.md index 83ee54920d..5125ace593 100644 --- a/README.md +++ b/README.md @@ -122,10 +122,7 @@ wandb login ├── examples # contains demonstration examples, start here to learn about LeRobot | └── advanced # contains even more examples for those who have mastered the basics ├── lerobot -| ├── configs # contains hydra yaml files with all options that you can override in the command line -| | ├── default.yaml # selected by default, it loads pusht environment and diffusion policy -| | ├── env # various sim environments and their datasets: aloha.yaml, pusht.yaml, xarm.yaml -| | └── policy # various policies: act.yaml, diffusion.yaml, tdmpc.yaml +| ├── configs # contains config classes with all options that you can override in the command line | ├── common # contains classes and utilities | | ├── datasets # various datasets of human demonstrations: aloha, pusht, xarm | | ├── envs # various sim environments: aloha, pusht, xarm From b0771b7c2e6e4074afcecc567c861a5fb363e428 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sun, 26 Jan 2025 10:35:41 +0100 Subject: [PATCH 66/94] Make 'last' checkpoint symlink relative --- lerobot/common/logger.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index adcb869b13..d93e5f4e2d 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -210,7 +210,9 @@ def save_checkpoint( checkpoint_dir / self.pretrained_model_dir_name, policy, wandb_artifact_name=wandb_artifact_name ) self.save_training_state(checkpoint_dir, train_step, optimizer, scheduler) - os.symlink(checkpoint_dir.absolute(), self.last_checkpoint_dir) + + relative_target = checkpoint_dir.relative_to(self.last_checkpoint_dir.parent) + self.last_checkpoint_dir.symlink_to(relative_target) def log_dict(self, d: dict, step: int, mode: str = "train"): assert mode in {"train", "eval"} From 8ac94290b1b961f6905fcf765a80764ccfcd1126 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sun, 26 Jan 2025 16:20:18 +0100 Subject: [PATCH 67/94] Simplify example 4 --- examples/4_train_policy_with_script.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md index 0a8e53ab78..45136355e4 100644 --- a/examples/4_train_policy_with_script.md +++ b/examples/4_train_policy_with_script.md @@ -111,14 +111,16 @@ Now, let's assume that we want to reproduce the run just above. That run has pro We can then simply load the config values from this file using: ```bash -python lerobot/scripts/train.py --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/train_config.json \ +python lerobot/scripts/train.py \ + --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \ --output_dir=outputs/train/act_aloha_transfer_2 ``` -`--config_path` is also a special argument which allows to initialize the config from a local config file. +`--config_path` is also a special argument which allows to initialize the config from a local config file. It can point to a directory that contains `train_config.json` or to the config file itself directly. Similarly to Hydra, we can still override some parameters in the CLI if we want to, e.g.: ```bash -python lerobot/scripts/train.py --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/train_config.json \ +python lerobot/scripts/train.py \ + --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \ --output_dir=outputs/train/act_aloha_transfer_2 --policy.n_action_steps=80 ``` @@ -154,17 +156,16 @@ INFO 2025-01-24 16:10:56 ts/train.py:263 Checkpoint policy after step 100 Now let's simulate a crash by killing the process (hit `ctrl`+`c`). We can then simply resume this run from the last checkpoint available with: ```bash python lerobot/scripts/train.py \ - --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/train_config.json \ + --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \ --resume=true ``` - You should see from the logging that your training picks up from where it left off. Another reason for which you might want to resume a run is simply to extend training and add more training steps. The number of training steps is set by the option `--offline.steps`, which is 100 000 by default. You could double the number of steps of the previous run with: ```bash python lerobot/scripts/train.py \ - --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/train_config.json \ + --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \ --resume=true \ --offline.steps=200000 ``` @@ -240,14 +241,14 @@ python lerobot/scripts/train.py \ #### Train a policy from scratch - config file + CLI ```bash python lerobot/scripts/train.py \ - --config_path=path/to/train_config.json \ # <- can also be a repo_id + --config_path=path/to/pretrained_model \ # <- can also be a repo_id --policy.n_action_steps=80 # <- you may still override values ``` #### Resume/continue a training run ```bash python lerobot/scripts/train.py \ - --config_path=checkpoint/pretrained_model/train_config.json \ + --config_path=checkpoint/pretrained_model/ \ --resume=true \ --offline.steps=200000 # <- you can change some training parameters ``` From 080d8b03ba03c6399e681a4b98814d8ce4f99e0b Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sun, 26 Jan 2025 23:26:16 +0100 Subject: [PATCH 68/94] Update docstrings --- lerobot/common/logger.py | 1 - lerobot/common/policies/factory.py | 39 +++++++++++++++++++----------- lerobot/scripts/eval.py | 1 - 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index d93e5f4e2d..5f863f68bb 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -75,7 +75,6 @@ class Logger: The logger creates the following directory structure: provided_log_dir - ├── .hydra # hydra's configuration cache ├── checkpoints │ ├── specific_checkpoint_name │ │ ├── pretrained_model # Hugging Face pretrained model directory diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 896ef0a2a8..455565a196 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -14,7 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import gymnasium as gym +import logging + from torch import nn from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata @@ -69,21 +70,30 @@ def make_policy( cfg: PreTrainedConfig, device: str, ds_meta: LeRobotDatasetMetadata | None = None, - env: gym.Env | None = None, env_cfg: EnvConfig | None = None, ) -> PreTrainedPolicy: """Make an instance of a policy class. + This function exists because (for now) we need to parse features from either a dataset or an environment + in order to properly dimension and instantiate a policy for that dataset or environment. + Args: - cfg (MainConfig): A MainConfig instance (see scripts). If `pretrained_policy_name_or_path` is - provided, only `cfg.policy.type` is used while everything else is ignored. - ds_meta (LeRobotDatasetMetadata): Dataset metadata to take input/output shapes and statistics to use - for (un)normalization of inputs/outputs in the policy. - pretrained_policy_name_or_path: Either the repo ID of a model hosted on the Hub or a path to a - directory containing weights saved using `Policy.save_pretrained`. Note that providing this - argument overrides everything in `hydra_cfg.policy` apart from `hydra_cfg.policy.type`. + cfg (PreTrainedConfig): The config of the policy to make. If `pretrained_path` is set, the policy will + be loaded with the weights from that path. + device (str): the device to load the policy onto. + ds_meta (LeRobotDatasetMetadata | None, optional): Dataset metadata to take input/output shapes and + statistics to use for (un)normalization of inputs/outputs in the policy. Defaults to None. + env_cfg (EnvConfig | None, optional): The config of a gym environment to parse features from. Must be + provided if ds_meta is not. Defaults to None. + + Raises: + ValueError: Either ds_meta or env and env_cfg must be provided. + NotImplementedError: if the policy.type is 'vqbet' and the device 'mps' (due to an incompatibility) + + Returns: + PreTrainedPolicy: _description_ """ - if not (ds_meta is None) ^ (env is None and env_cfg is None): + if not (ds_meta is None) ^ (env_cfg is None): raise ValueError("Either one of a dataset metadata or a sim env must be provided.") # NOTE: Currently, if you try to run vqbet with mps backend, you'll get this error. @@ -106,10 +116,11 @@ def make_policy( features = dataset_to_policy_features(ds_meta.features) kwargs["dataset_stats"] = ds_meta.stats else: - if not cfg.pretrained_path or not cfg.output_features or not cfg.input_features: - raise NotImplementedError( - "The policy must have already existing features in its config when initializing it " - "with an environment." + if not cfg.pretrained_path: + logging.warning( + "You are instantiating a policy from scratch and its features are parsed from an environment " + "rather than a dataset. Normalization modules inside the policy will have infinite values " + "by default without stats from a dataset." ) features = env_to_policy_features(env_cfg) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 1716ea43bd..30b8003a9a 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -465,7 +465,6 @@ def eval(cfg: EvalPipelineConfig): policy = make_policy( cfg=cfg.policy, device=device, - env=env, env_cfg=cfg.env, ) policy.eval() From c53167cbb2831cb7170f39e31da000937bc5d45f Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Sun, 26 Jan 2025 23:27:38 +0100 Subject: [PATCH 69/94] Change default device selection, add warnings & errors --- lerobot/common/utils/utils.py | 13 ++++++++++ lerobot/configs/default.py | 20 +++++++++++++++ lerobot/configs/eval.py | 46 +++++++++++++++++++---------------- lerobot/configs/train.py | 12 ++++++--- 4 files changed, 67 insertions(+), 24 deletions(-) diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index 235b30cb36..329b8b8a21 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -40,6 +40,19 @@ def inside_slurm(): return "SLURM_JOB_ID" in os.environ +def auto_select_torch_device() -> torch.device: + """Tries to select automatically a torch device.""" + if torch.cuda.is_available(): + logging.log("Cuda backend detected, using cuda.") + return torch.device("cuda") + elif torch.backends.mps.is_available(): + logging.log("Metal backend detected, using cuda.") + return torch.device("mps") + else: + logging.warning("No accelerated backend detected. Using default cpu, this will be slow.") + return torch.device("cpu") + + def get_safe_torch_device(try_device: str, log: bool = False) -> torch.device: """Given a string, return a torch.device with checks on whether the device is available.""" match try_device: diff --git a/lerobot/configs/default.py b/lerobot/configs/default.py index 86987021d5..5dd2f898a0 100644 --- a/lerobot/configs/default.py +++ b/lerobot/configs/default.py @@ -44,3 +44,23 @@ class WandBConfig: project: str = "lerobot" entity: str | None = None notes: str | None = None + + +@dataclass +class EvalConfig: + n_episodes: int = 50 + # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. + batch_size: int = 50 + # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). + use_async_envs: bool = False + + def __post_init__(self): + if self.batch_size > self.n_episodes: + raise ValueError( + "The eval batch size is greater than the number of eval episodes " + f"({self.batch_size} > {self.n_episodes}). As a result, {self.batch_size} " + f"eval environments will be instantiated, but only {self.n_episodes} will be used. " + "This might significantly slow down evaluation. To fix this, you should update your command " + f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={self.batch_size}`), " + f"or lower the batch size (e.g. `eval.batch_size={self.n_episodes}`)." + ) diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index abb29beef9..a815d57d1b 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -1,30 +1,14 @@ import datetime as dt +import logging from dataclasses import dataclass, field from pathlib import Path from lerobot.common import envs, policies # noqa: F401 +from lerobot.common.utils.utils import auto_select_torch_device from lerobot.configs import parser +from lerobot.configs.default import EvalConfig from lerobot.configs.policies import PreTrainedConfig - - -@dataclass -class EvalConfig: - n_episodes: int = 50 - # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. - batch_size: int = 50 - # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). - use_async_envs: bool = False - - def __post_init__(self): - if self.batch_size > self.n_episodes: - raise ValueError( - "The eval batch size is greater than the number of eval episodes " - f"({self.batch_size} > {self.n_episodes}). As a result, {self.batch_size} " - f"eval environments will be instantiated, but only {self.n_episodes} will be used. " - "This might significantly slow down evaluation. To fix this, you should update your command " - f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={self.batch_size}`), " - f"or lower the batch size (e.g. `eval.batch_size={self.n_episodes}`)." - ) +from lerobot.configs.train import TrainPipelineConfig @dataclass @@ -41,10 +25,15 @@ class EvalPipelineConfig: device: str | None = None # cuda | cpu | mps # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, # automatic gradient scaling is used. - use_amp: bool | None = None + use_amp: bool = False seed: int | None = 1000 def __post_init__(self): + if not self.device: + logging.warning("No device specified, trying to infer device automatically") + device = auto_select_torch_device() + self.device = device.type + if self.use_amp and self.device not in ["cuda", "cpu"]: raise NotImplementedError( "Automatic Mixed Precision (amp) is only available for 'cuda' and 'cpu' devices. " @@ -57,6 +46,21 @@ def __post_init__(self): cli_overrides = parser.get_cli_overrides("policy") self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path + pretrained_cfg = TrainPipelineConfig.from_pretrained(policy_path) + if self.use_amp != pretrained_cfg.use_amp: + raise ValueError( + f"The policy you are trying to load has been trained with use_amp={pretrained_cfg.use_amp} " + f"but you're trying to evaluate it with use_amp={self.use_amp}" + ) + if self.device == "mps" and pretrained_cfg.device == "cuda": + logging.warning( + "You are loading a policy that has been trained with a Cuda kernel on a Metal backend." + "This is lilely to produced unexpected results due to differences between these two kernels." + ) + else: + logging.warning( + "No pretrained path was provided, evaluated policy will be built from scratch (random weights)." + ) if not self.job_name: if self.env is None: diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index 124195fd59..41030e4dc7 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -1,4 +1,5 @@ import datetime as dt +import logging import os from dataclasses import dataclass, field from pathlib import Path @@ -12,9 +13,9 @@ from lerobot.common.optim import OptimizerConfig from lerobot.common.optim.schedulers import LRSchedulerConfig from lerobot.common.utils.hub import HubMixin +from lerobot.common.utils.utils import auto_select_torch_device from lerobot.configs import parser -from lerobot.configs.default import DatasetConfig, WandBConfig -from lerobot.configs.eval import EvalConfig +from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig from lerobot.configs.policies import PreTrainedConfig TRAIN_CONFIG_NAME = "train_config.json" @@ -96,7 +97,7 @@ class TrainPipelineConfig(HubMixin): # Note that when resuming a run, the default behavior is to use the configuration from the checkpoint, # regardless of what's provided with the training command at the time of resumption. resume: bool = False - device: str = "cuda" # | cpu | mp + device: str | None = None # cuda | cpu | mp # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, # automatic gradient scaling is used. use_amp: bool = False @@ -122,6 +123,11 @@ class TrainPipelineConfig(HubMixin): def __post_init__(self): self.checkpoint_path = None + if not self.device: + logging.warning("No device specified, trying to infer device automatically") + device = auto_select_torch_device() + self.device = device.type + if self.use_amp and self.device not in ["cuda", "cpu"]: raise NotImplementedError( "Automatic Mixed Precision (amp) is only available for 'cuda' and 'cpu' devices. " From 967ae993fe4cacc52bdc7674a8b27116696bcd2d Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 27 Jan 2025 17:43:03 +0100 Subject: [PATCH 70/94] Use validate instead of __post_init__ --- lerobot/configs/eval.py | 8 ++++---- lerobot/configs/train.py | 9 +++++++-- lerobot/scripts/train.py | 2 ++ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index a815d57d1b..e019cde7a4 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -46,13 +46,13 @@ def __post_init__(self): cli_overrides = parser.get_cli_overrides("policy") self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path - pretrained_cfg = TrainPipelineConfig.from_pretrained(policy_path) - if self.use_amp != pretrained_cfg.use_amp: + train_cfg = TrainPipelineConfig.from_pretrained(policy_path, validate=False) + if self.use_amp != train_cfg.use_amp: raise ValueError( - f"The policy you are trying to load has been trained with use_amp={pretrained_cfg.use_amp} " + f"The policy you are trying to load has been trained with use_amp={train_cfg.use_amp} " f"but you're trying to evaluate it with use_amp={self.use_amp}" ) - if self.device == "mps" and pretrained_cfg.device == "cuda": + if self.device == "mps" and train_cfg.device == "cuda": logging.warning( "You are loading a policy that has been trained with a Cuda kernel on a Metal backend." "This is lilely to produced unexpected results due to differences between these two kernels." diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index 41030e4dc7..870f421c72 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -123,6 +123,7 @@ class TrainPipelineConfig(HubMixin): def __post_init__(self): self.checkpoint_path = None + def validate(self): if not self.device: logging.warning("No device specified, trying to infer device automatically") device = auto_select_torch_device() @@ -197,6 +198,7 @@ def from_pretrained( cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, + validate: bool = True, **kwargs, ) -> "TrainPipelineConfig": model_id = str(pretrained_name_or_path) @@ -224,5 +226,8 @@ def from_pretrained( except HfHubHTTPError as e: print(f"config.json not found on the HuggingFace Hub: {str(e)}") - cli_args = kwargs.pop("cli_args", []) - return draccus.parse(cls, config_file, args=cli_args) + cfg = draccus.parse(cls, config_file, args=[]) + if validate: + cfg.validate() + + return cfg diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 78facdeea6..765218765b 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -182,6 +182,8 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): @parser.wrap() def train(cfg: TrainPipelineConfig): + cfg.validate() + init_logging() logging.info(pformat(asdict(cfg))) From 56a6f5814f81b98bf33ecf412ce817e36fe70f46 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 27 Jan 2025 17:47:25 +0100 Subject: [PATCH 71/94] Fix --- lerobot/configs/train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index 870f421c72..ab0579a2f0 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -226,7 +226,8 @@ def from_pretrained( except HfHubHTTPError as e: print(f"config.json not found on the HuggingFace Hub: {str(e)}") - cfg = draccus.parse(cls, config_file, args=[]) + cli_args = kwargs.pop("cli_args", []) + cfg = draccus.parse(cls, config_file, args=cli_args) if validate: cfg.validate() From 8cca5bd02fc6b9cc024604851e7782e47a3b9428 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 27 Jan 2025 17:53:06 +0100 Subject: [PATCH 72/94] Remove validate --- lerobot/configs/eval.py | 2 +- lerobot/configs/train.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index e019cde7a4..c95929e136 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -46,7 +46,7 @@ def __post_init__(self): cli_overrides = parser.get_cli_overrides("policy") self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path - train_cfg = TrainPipelineConfig.from_pretrained(policy_path, validate=False) + train_cfg = TrainPipelineConfig.from_pretrained(policy_path) if self.use_amp != train_cfg.use_amp: raise ValueError( f"The policy you are trying to load has been trained with use_amp={train_cfg.use_amp} " diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index ab0579a2f0..b1342500c9 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -198,7 +198,6 @@ def from_pretrained( cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, - validate: bool = True, **kwargs, ) -> "TrainPipelineConfig": model_id = str(pretrained_name_or_path) @@ -228,7 +227,5 @@ def from_pretrained( cli_args = kwargs.pop("cli_args", []) cfg = draccus.parse(cls, config_file, args=cli_args) - if validate: - cfg.validate() return cfg From d06505f210990f5f6dc2efc1d168aa4f511d9395 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Mon, 27 Jan 2025 18:56:12 +0100 Subject: [PATCH 73/94] Fix tests --- tests/scripts/save_policy_to_safetensors.py | 1 + tests/test_policies.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/scripts/save_policy_to_safetensors.py b/tests/scripts/save_policy_to_safetensors.py index b921b4738a..61e7b10b9d 100644 --- a/tests/scripts/save_policy_to_safetensors.py +++ b/tests/scripts/save_policy_to_safetensors.py @@ -38,6 +38,7 @@ def get_policy_stats(ds_repo_id, env_name, policy_name, policy_kwargs, train_kwa device="cpu", **train_kwargs, ) + train_cfg.validate() # Needed for auto-setting some parameters dataset = make_dataset(train_cfg) policy = make_policy(train_cfg.policy, ds_meta=dataset.meta, device=train_cfg.device) diff --git a/tests/test_policies.py b/tests/test_policies.py index bf6eabcc7c..f4b4d6dac1 100644 --- a/tests/test_policies.py +++ b/tests/test_policies.py @@ -214,6 +214,7 @@ def test_act_backbone_lr(): policy=make_policy_config("act", optimizer_lr=0.01, optimizer_lr_backbone=0.001), device=DEVICE, ) + cfg.validate() # Needed for auto-setting some parameters assert cfg.policy.optimizer_lr == 0.01 assert cfg.policy.optimizer_lr_backbone == 0.001 From 85c6f3ae07ee159d5033a08ed61ebf059b032dea Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 08:40:58 +0100 Subject: [PATCH 74/94] Skip push_dataset_to_hub tests --- tests/test_push_dataset_to_hub.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_push_dataset_to_hub.py b/tests/test_push_dataset_to_hub.py index ff630ab66c..a0c8d90837 100644 --- a/tests/test_push_dataset_to_hub.py +++ b/tests/test_push_dataset_to_hub.py @@ -229,11 +229,13 @@ def _mock_download_raw(raw_dir, repo_id): raise ValueError(repo_id) +@pytest.mark.skip("push_dataset_to_hub is deprecated") def test_push_dataset_to_hub_invalid_repo_id(tmpdir): with pytest.raises(ValueError): push_dataset_to_hub(Path(tmpdir), "raw_format", "invalid_repo_id") +@pytest.mark.skip("push_dataset_to_hub is deprecated") def test_push_dataset_to_hub_out_dir_force_override_false(tmpdir): tmpdir = Path(tmpdir) out_dir = tmpdir / "out" @@ -250,7 +252,7 @@ def test_push_dataset_to_hub_out_dir_force_override_false(tmpdir): ) -@pytest.mark.skip("TODO after v2 migration / removing hydra") +@pytest.mark.skip("push_dataset_to_hub is deprecated") @pytest.mark.parametrize( "required_packages, raw_format, repo_id, make_test_data", [ @@ -318,6 +320,7 @@ def test_push_dataset_to_hub_format(required_packages, tmpdir, raw_format, repo_ assert torch.equal(test_dataset.episode_data_index[k], lerobot_dataset.episode_data_index[k][:1]) +@pytest.mark.skip("push_dataset_to_hub is deprecated") @pytest.mark.parametrize( "raw_format, repo_id", [ @@ -329,9 +332,6 @@ def test_push_dataset_to_hub_format(required_packages, tmpdir, raw_format, repo_ ("dora_parquet", "cadene/wrist_gripper"), ], ) -@pytest.mark.skip( - "Not compatible with our CI since it downloads raw datasets. Run with `python -m pytest --run-skipped tests/test_push_dataset_to_hub.py::test_push_dataset_to_hub_pusht_backward_compatibility`" -) def test_push_dataset_to_hub_pusht_backward_compatibility(tmpdir, raw_format, repo_id): _, dataset_id = repo_id.split("/") From ad458b6e06baf0b604fb919aedb02135f0f2788e Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 09:25:56 +0100 Subject: [PATCH 75/94] Add exceptions --- lerobot/common/policies/pretrained.py | 30 ++++++++++++++++----------- lerobot/configs/policies.py | 4 +++- lerobot/configs/train.py | 4 +++- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/lerobot/common/policies/pretrained.py b/lerobot/common/policies/pretrained.py index 288f139b3d..8476759464 100644 --- a/lerobot/common/policies/pretrained.py +++ b/lerobot/common/policies/pretrained.py @@ -8,6 +8,7 @@ import safetensors from huggingface_hub import hf_hub_download from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE +from huggingface_hub.errors import HfHubHTTPError from safetensors.torch import load_model as load_model_as_safetensor from safetensors.torch import save_model as save_model_as_safetensor from torch import Tensor, nn @@ -99,18 +100,23 @@ def from_pretrained( model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE) policy = cls._load_as_safetensor(instance, model_file, map_location, strict) else: - model_file = hf_hub_download( - repo_id=model_id, - filename=SAFETENSORS_SINGLE_FILE, - revision=revision, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - token=token, - local_files_only=local_files_only, - ) - policy = cls._load_as_safetensor(instance, model_file, map_location, strict) + try: + model_file = hf_hub_download( + repo_id=model_id, + filename=SAFETENSORS_SINGLE_FILE, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + policy = cls._load_as_safetensor(instance, model_file, map_location, strict) + except HfHubHTTPError as e: + raise FileNotFoundError( + f"{SAFETENSORS_SINGLE_FILE} not found on the HuggingFace Hub in {model_id}" + ) from e policy.to(map_location) policy.eval() diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py index 1ba9d0387f..9b5a7c5c91 100644 --- a/lerobot/configs/policies.py +++ b/lerobot/configs/policies.py @@ -135,7 +135,9 @@ def from_pretrained( local_files_only=local_files_only, ) except HfHubHTTPError as e: - print(f"config.json not found on the HuggingFace Hub: {str(e)}") + raise FileNotFoundError( + f"{CONFIG_NAME} not found on the HuggingFace Hub in {model_id}" + ) from e # HACK: this is very ugly, ideally we'd like to be able to do that natively with draccus # something like --policy.path (in addition to --policy.type) diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index b1342500c9..d31e6ff20a 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -223,7 +223,9 @@ def from_pretrained( local_files_only=local_files_only, ) except HfHubHTTPError as e: - print(f"config.json not found on the HuggingFace Hub: {str(e)}") + raise FileNotFoundError( + f"{TRAIN_CONFIG_NAME} not found on the HuggingFace Hub in {model_id}" + ) from e cli_args = kwargs.pop("cli_args", []) cfg = draccus.parse(cls, config_file, args=cli_args) From 3551dc501596d95846529c4140b5ac1155f9a344 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 09:55:46 +0100 Subject: [PATCH 76/94] Update factories docstrings --- lerobot/common/datasets/factory.py | 26 ++++++++++++++++++++------ lerobot/common/envs/factory.py | 18 +++++++++++++----- lerobot/common/optim/factory.py | 9 +++++++++ 3 files changed, 42 insertions(+), 11 deletions(-) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 1b5863466d..58ff400e46 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -36,10 +36,20 @@ def resolve_delta_timestamps( cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata ) -> dict[str, list] | None: - """Resolves delta_timestamps config key (in-place) by using `eval`. + """Resolves delta_timestamps by reading from the 'delta_indices' properties of the PreTrainedConfig. - Doesn't do anything if delta_timestamps is not specified or has already been resolve (as evidenced by - the data type of its values). + Args: + cfg (PreTrainedConfig): The PreTrainedConfig to read delta_indices from. + ds_meta (LeRobotDatasetMetadata): The dataset from which features and fps are used to build + delta_timestamps against. + + Returns: + dict[str, list] | None: A dictionary of delta_timestamps, e.g.: + { + "observation.state": [-0.04, -0.02, 0] + "observation.action": [-0.02, 0, 0.02] + } + returns `None` if the the resulting dict is empty. """ delta_timestamps = {} for key in ds_meta.features: @@ -57,12 +67,16 @@ def resolve_delta_timestamps( def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDataset: - """ + """Handles the logic of setting up delta timestamps and image transforms before creating a dataset. + Args: - cfg: A TrainPipelineConfig config which contains a DatasetConfig and a PreTrainedConfig. + cfg (TrainPipelineConfig): A TrainPipelineConfig config which contains a DatasetConfig and a PreTrainedConfig. + + Raises: + NotImplementedError: The MultiLeRobotDataset is currently deactivated. Returns: - A LeRobotDataset. + LeRobotDataset | MultiLeRobotDataset """ image_transforms = ( ImageTransforms(cfg.dataset.image_transforms) if cfg.dataset.image_transforms.enable else None diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index f505f891a9..49239363c7 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -32,13 +32,21 @@ def make_env_config(env_type: str, **kwargs) -> EnvConfig: def make_env(cfg: EnvConfig, n_envs: int = 1, use_async_envs: bool = False) -> gym.vector.VectorEnv | None: - """Makes a gym vector environment according to the evaluation config. + """Makes a gym vector environment according to the config. - n_envs can be used to override eval.batch_size in the configuration. Must be at least 1. - """ - if cfg is None: - return + Args: + cfg (EnvConfig): the config of the environment to instantiate. + n_envs (int, optional): The number of parallelized env to return. Defaults to 1. + use_async_envs (bool, optional): Wether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to + False. + + Raises: + ValueError: if n_envs < 1 + ModuleNotFoundError: If the requested env package is not intalled + Returns: + gym.vector.VectorEnv: The parallelized gym.env instance. + """ if n_envs < 1: raise ValueError("`n_envs must be at least 1") diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py index c0ebc7b62a..9078284f6f 100644 --- a/lerobot/common/optim/factory.py +++ b/lerobot/common/optim/factory.py @@ -29,6 +29,15 @@ def make_optimizer_and_scheduler( cfg: TrainPipelineConfig, policy: PreTrainedPolicy ) -> tuple[Optimizer, LRScheduler | None]: + """Generates the optimizer and scheduler based on configs. + + Args: + cfg (TrainPipelineConfig): The training config that contains optimizer and scheduler configs + policy (PreTrainedPolicy): The policy config from which parameters and presets must be taken from. + + Returns: + tuple[Optimizer, LRScheduler | None]: The couple (Optimizer, Scheduler). Scheduler can be `None`. + """ params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters() optimizer = cfg.optimizer.build(params) lr_scheduler = cfg.scheduler.build(optimizer, cfg.offline.steps) if cfg.scheduler is not None else None From d86fc236900ed90088ca13e11b01a8851eef7e11 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 09:56:39 +0100 Subject: [PATCH 77/94] Update validations --- lerobot/configs/train.py | 7 +++++-- lerobot/scripts/train.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index d31e6ff20a..1021a3fcdd 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -168,8 +168,11 @@ def validate(self): train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" self.output_dir = Path("outputs/train") / train_dir - if self.online.steps > 0 and isinstance(self.dataset.repo_id, list): - raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") + if self.online.steps > 0: + if isinstance(self.dataset.repo_id, list): + raise NotImplementedError("Online training with LeRobotMultiDataset is not implemented.") + if self.env is None: + raise ValueError("An environment is required for online training") if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 765218765b..733e525ae1 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -206,7 +206,7 @@ def train(cfg: TrainPipelineConfig): # On real-world data, no need to create an environment as evaluations are done outside train.py, # using the eval.py instead, with gym_dora environment and dora-rs. eval_env = None - if cfg.eval_freq > 0 or cfg.env is None: + if cfg.eval_freq > 0 and cfg.env is not None: logging.info("Creating env") eval_env = make_env(cfg.env, n_envs=cfg.eval.batch_size) From 940e9d8932bb2bd02abec3ae510f3d2dd2a0a23d Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 11:37:38 +0100 Subject: [PATCH 78/94] Remove deprecated config files --- lerobot/configs/_deprecated/default.yaml | 130 ------------------ lerobot/configs/_deprecated/policy/act.yaml | 82 ----------- .../_deprecated/policy/act_aloha_real.yaml | 114 --------------- .../_deprecated/policy/act_koch_real.yaml | 102 -------------- .../_deprecated/policy/act_moss_real.yaml | 102 -------------- .../_deprecated/policy/act_so100_real.yaml | 102 -------------- .../configs/_deprecated/policy/diffusion.yaml | 104 -------------- .../policy/diffusion_pusht_keypoints.yaml | 110 --------------- lerobot/configs/_deprecated/policy/tdmpc.yaml | 93 ------------- .../policy/tdmpc_pusht_keypoints.yaml | 105 -------------- lerobot/configs/_deprecated/policy/vqbet.yaml | 103 -------------- lerobot/configs/_deprecated/robot/aloha.yaml | 117 ---------------- lerobot/configs/_deprecated/robot/koch.yaml | 53 ------- .../_deprecated/robot/koch_bimanual.yaml | 75 ---------- lerobot/configs/_deprecated/robot/moss.yaml | 56 -------- lerobot/configs/_deprecated/robot/so100.yaml | 56 -------- .../configs/_deprecated/robot/stretch.yaml | 33 ----- 17 files changed, 1537 deletions(-) delete mode 100644 lerobot/configs/_deprecated/default.yaml delete mode 100644 lerobot/configs/_deprecated/policy/act.yaml delete mode 100644 lerobot/configs/_deprecated/policy/act_aloha_real.yaml delete mode 100644 lerobot/configs/_deprecated/policy/act_koch_real.yaml delete mode 100644 lerobot/configs/_deprecated/policy/act_moss_real.yaml delete mode 100644 lerobot/configs/_deprecated/policy/act_so100_real.yaml delete mode 100644 lerobot/configs/_deprecated/policy/diffusion.yaml delete mode 100644 lerobot/configs/_deprecated/policy/diffusion_pusht_keypoints.yaml delete mode 100644 lerobot/configs/_deprecated/policy/tdmpc.yaml delete mode 100644 lerobot/configs/_deprecated/policy/tdmpc_pusht_keypoints.yaml delete mode 100644 lerobot/configs/_deprecated/policy/vqbet.yaml delete mode 100644 lerobot/configs/_deprecated/robot/aloha.yaml delete mode 100644 lerobot/configs/_deprecated/robot/koch.yaml delete mode 100644 lerobot/configs/_deprecated/robot/koch_bimanual.yaml delete mode 100644 lerobot/configs/_deprecated/robot/moss.yaml delete mode 100644 lerobot/configs/_deprecated/robot/so100.yaml delete mode 100644 lerobot/configs/_deprecated/robot/stretch.yaml diff --git a/lerobot/configs/_deprecated/default.yaml b/lerobot/configs/_deprecated/default.yaml deleted file mode 100644 index a3ff1d41b2..0000000000 --- a/lerobot/configs/_deprecated/default.yaml +++ /dev/null @@ -1,130 +0,0 @@ -defaults: - - _self_ - - env: pusht - - policy: diffusion - -hydra: - run: - # Set `dir` to where you would like to save all of the run outputs. If you run another training session - # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. - dir: outputs/train/${now:%Y-%m-%d}/${now:%H-%M-%S}_${env.name}_${policy.name}_${hydra.job.name} - job: - name: default - -# Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure -# `hydra.run.dir` is the directory of an existing run with at least one checkpoint in it. -# Note that when resuming a run, the default behavior is to use the configuration from the checkpoint, -# regardless of what's provided with the training command at the time of resumption. -resume: false -device: cuda # cpu -# `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, -# automatic gradient scaling is used. -use_amp: false -# `seed` is used for training (eg: model initialization, dataset shuffling) -# AND for the evaluation environments. -seed: ??? -# You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data -# keys common between the datasets are kept. Each dataset gets and additional transform that inserts the -# "dataset_index" into the returned item. The index mapping is made according to the order in which the -# datsets are provided. -dataset_repo_id: lerobot/pusht -video_backend: pyav - -training: - offline_steps: ??? - - # Number of workers for the offline training dataloader. - num_workers: 4 - - batch_size: ??? - - eval_freq: ??? - log_freq: 200 - save_checkpoint: true - # Checkpoint is saved every `save_freq` training iterations and after the last training step. - save_freq: ??? - - # Online training. Note that the online training loop adopts most of the options above apart from the - # dataloader options. Unless otherwise specified. - # The online training look looks something like: - # - # for i in range(online_steps): - # do_online_rollout_and_update_online_buffer() - # for j in range(online_steps_between_rollouts): - # batch = next(dataloader_with_offline_and_online_data) - # loss = policy(batch) - # loss.backward() - # optimizer.step() - # - online_steps: ??? - # How many episodes to collect at once when we reach the online rollout part of the training loop. - online_rollout_n_episodes: 1 - # The number of environments to use in the gym.vector.VectorEnv. This ends up also being the batch size for - # the policy. Ideally you should set this to by an even divisor or online_rollout_n_episodes. - online_rollout_batch_size: 1 - # How many optimization steps (forward, backward, optimizer step) to do between running rollouts. - online_steps_between_rollouts: null - # The proportion of online samples (vs offline samples) to include in the online training batches. - online_sampling_ratio: 0.5 - # First seed to use for the online rollout environment. Seeds for subsequent rollouts are incremented by 1. - online_env_seed: null - # Sets the maximum number of frames that are stored in the online buffer for online training. The buffer is - # FIFO. - online_buffer_capacity: null - # The minimum number of frames to have in the online buffer before commencing online training. - # If online_buffer_seed_size > online_rollout_n_episodes, the rollout will be run multiple times until the - # seed size condition is satisfied. - online_buffer_seed_size: 0 - # Whether to run the online rollouts asynchronously. This means we can run the online training steps in - # parallel with the rollouts. This might be advised if your GPU has the bandwidth to handle training - # + eval + environment rendering simultaneously. - do_online_rollout_async: false - - image_transforms: - # These transforms are all using standard torchvision.transforms.v2 - # You can find out how these transformations affect images here: - # https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html - # We use a custom RandomSubsetApply container to sample them. - # For each transform, the following parameters are available: - # weight: This represents the multinomial probability (with no replacement) - # used for sampling the transform. If the sum of the weights is not 1, - # they will be normalized. - # min_max: Lower & upper bound respectively used for sampling the transform's parameter - # (following uniform distribution) when it's applied. - # Set this flag to `true` to enable transforms during training - enable: false - # This is the maximum number of transforms (sampled from these below) that will be applied to each frame. - # It's an integer in the interval [1, number of available transforms]. - max_num_transforms: 3 - # By default, transforms are applied in Torchvision's suggested order (shown below). - # Set this to True to apply them in a random order. - random_order: false - brightness: - weight: 1 - min_max: [0.8, 1.2] - contrast: - weight: 1 - min_max: [0.8, 1.2] - saturation: - weight: 1 - min_max: [0.5, 1.5] - hue: - weight: 1 - min_max: [-0.05, 0.05] - sharpness: - weight: 1 - min_max: [0.8, 1.2] - -eval: - n_episodes: 1 - # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. - batch_size: 1 - # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). - use_async_envs: false - -wandb: - enable: false - # Set to true to disable saving an artifact despite save_checkpoint == True - disable_artifact: false - project: lerobot - notes: "" diff --git a/lerobot/configs/_deprecated/policy/act.yaml b/lerobot/configs/_deprecated/policy/act.yaml deleted file mode 100644 index 28883936a6..0000000000 --- a/lerobot/configs/_deprecated/policy/act.yaml +++ /dev/null @@ -1,82 +0,0 @@ -# @package _global_ - -seed: 1000 -dataset_repo_id: lerobot/aloha_sim_insertion_human - -override_dataset_stats: - observation.images.top: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - -training: - offline_steps: 100000 - online_steps: 0 - eval_freq: 20000 - save_freq: 20000 - save_checkpoint: true - - batch_size: 8 - lr: 1e-5 - lr_backbone: 1e-5 - weight_decay: 1e-4 - grad_clip_norm: 10 - online_steps_between_rollouts: 1 - - delta_timestamps: - action: "[i / ${fps} for i in range(${policy.chunk_size})]" - -eval: - n_episodes: 50 - batch_size: 50 - -# See `configuration_act.py` for more details. -policy: - name: act - - # Input / output structure. - n_obs_steps: 1 - chunk_size: 100 # chunk_size - n_action_steps: 100 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.images.top: [3, 480, 640] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.images.top: mean_std - observation.state: mean_std - output_normalization_modes: - action: mean_std - - # Architecture. - # Vision backbone. - vision_backbone: resnet18 - pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 - replace_final_stride_with_dilation: false - # Transformer layers. - pre_norm: false - dim_model: 512 - n_heads: 8 - dim_feedforward: 3200 - feedforward_activation: relu - n_encoder_layers: 4 - # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code - # that means only the first layer is used. Here we match the original implementation by setting this to 1. - # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. - n_decoder_layers: 1 - # VAE. - use_vae: true - latent_dim: 32 - n_vae_encoder_layers: 4 - - # Inference. - temporal_ensemble_coeff: null - - # Training and loss computation. - dropout: 0.1 - kl_weight: 10.0 diff --git a/lerobot/configs/_deprecated/policy/act_aloha_real.yaml b/lerobot/configs/_deprecated/policy/act_aloha_real.yaml deleted file mode 100644 index 99d01bb202..0000000000 --- a/lerobot/configs/_deprecated/policy/act_aloha_real.yaml +++ /dev/null @@ -1,114 +0,0 @@ -# @package _global_ - -# Use `act_aloha_real.yaml` to train on real-world datasets collected on Aloha or Aloha-2 robots. -# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, cam_high, cam_low) instead of 1 camera (i.e. top). -# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps. -# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script. -# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy. -# -# Example of usage for training and inference with `control_robot.py`: -# ```bash -# python lerobot/scripts/train.py \ -# policy=act_aloha_real \ -# env=aloha_real -# ``` - -seed: 1000 -dataset_repo_id: lerobot/aloha_static_vinh_cup - -override_dataset_stats: - observation.images.cam_right_wrist: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_left_wrist: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_high: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_low: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - -training: - offline_steps: 80000 - online_steps: 0 - eval_freq: -1 - save_freq: 10000 - log_freq: 100 - save_checkpoint: true - - batch_size: 8 - lr: 1e-5 - lr_backbone: 1e-5 - weight_decay: 1e-4 - grad_clip_norm: 10 - online_steps_between_rollouts: 1 - - delta_timestamps: - action: "[i / ${fps} for i in range(${policy.chunk_size})]" - -eval: - n_episodes: 50 - batch_size: 50 - -# See `configuration_act.py` for more details. -policy: - name: act - - # Input / output structure. - n_obs_steps: 1 - chunk_size: 100 - n_action_steps: 100 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.images.cam_right_wrist: [3, 480, 640] - observation.images.cam_left_wrist: [3, 480, 640] - observation.images.cam_high: [3, 480, 640] - observation.images.cam_low: [3, 480, 640] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.images.cam_right_wrist: mean_std - observation.images.cam_left_wrist: mean_std - observation.images.cam_high: mean_std - observation.images.cam_low: mean_std - observation.state: mean_std - output_normalization_modes: - action: mean_std - - # Architecture. - # Vision backbone. - vision_backbone: resnet18 - pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 - replace_final_stride_with_dilation: false - # Transformer layers. - pre_norm: false - dim_model: 512 - n_heads: 8 - dim_feedforward: 3200 - feedforward_activation: relu - n_encoder_layers: 4 - # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code - # that means only the first layer is used. Here we match the original implementation by setting this to 1. - # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. - n_decoder_layers: 1 - # VAE. - use_vae: true - latent_dim: 32 - n_vae_encoder_layers: 4 - - # Inference. - temporal_ensemble_coeff: null - - # Training and loss computation. - dropout: 0.1 - kl_weight: 10.0 diff --git a/lerobot/configs/_deprecated/policy/act_koch_real.yaml b/lerobot/configs/_deprecated/policy/act_koch_real.yaml deleted file mode 100644 index 6ddebab14d..0000000000 --- a/lerobot/configs/_deprecated/policy/act_koch_real.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# @package _global_ - -# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots. -# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top). -# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps. -# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script. -# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy. -# -# Example of usage for training: -# ```bash -# python lerobot/scripts/train.py \ -# policy=act_koch_real \ -# env=koch_real -# ``` - -seed: 1000 -dataset_repo_id: lerobot/koch_pick_place_lego - -override_dataset_stats: - observation.images.laptop: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.phone: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - -training: - offline_steps: 80000 - online_steps: 0 - eval_freq: -1 - save_freq: 10000 - log_freq: 100 - save_checkpoint: true - - batch_size: 8 - lr: 1e-5 - lr_backbone: 1e-5 - weight_decay: 1e-4 - grad_clip_norm: 10 - online_steps_between_rollouts: 1 - - delta_timestamps: - action: "[i / ${fps} for i in range(${policy.chunk_size})]" - -eval: - n_episodes: 50 - batch_size: 50 - -# See `configuration_act.py` for more details. -policy: - name: act - - # Input / output structure. - n_obs_steps: 1 - chunk_size: 100 - n_action_steps: 100 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.images.laptop: [3, 480, 640] - observation.images.phone: [3, 480, 640] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.images.laptop: mean_std - observation.images.phone: mean_std - observation.state: mean_std - output_normalization_modes: - action: mean_std - - # Architecture. - # Vision backbone. - vision_backbone: resnet18 - pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 - replace_final_stride_with_dilation: false - # Transformer layers. - pre_norm: false - dim_model: 512 - n_heads: 8 - dim_feedforward: 3200 - feedforward_activation: relu - n_encoder_layers: 4 - # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code - # that means only the first layer is used. Here we match the original implementation by setting this to 1. - # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. - n_decoder_layers: 1 - # VAE. - use_vae: true - latent_dim: 32 - n_vae_encoder_layers: 4 - - # Inference. - temporal_ensemble_coeff: null - - # Training and loss computation. - dropout: 0.1 - kl_weight: 10.0 diff --git a/lerobot/configs/_deprecated/policy/act_moss_real.yaml b/lerobot/configs/_deprecated/policy/act_moss_real.yaml deleted file mode 100644 index d996c3597d..0000000000 --- a/lerobot/configs/_deprecated/policy/act_moss_real.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# @package _global_ - -# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots. -# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top). -# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps. -# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script. -# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy. -# -# Example of usage for training: -# ```bash -# python lerobot/scripts/train.py \ -# policy=act_koch_real \ -# env=koch_real -# ``` - -seed: 1000 -dataset_repo_id: lerobot/moss_pick_place_lego - -override_dataset_stats: - observation.images.laptop: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.phone: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - -training: - offline_steps: 80000 - online_steps: 0 - eval_freq: -1 - save_freq: 10000 - log_freq: 100 - save_checkpoint: true - - batch_size: 8 - lr: 1e-5 - lr_backbone: 1e-5 - weight_decay: 1e-4 - grad_clip_norm: 10 - online_steps_between_rollouts: 1 - - delta_timestamps: - action: "[i / ${fps} for i in range(${policy.chunk_size})]" - -eval: - n_episodes: 50 - batch_size: 50 - -# See `configuration_act.py` for more details. -policy: - name: act - - # Input / output structure. - n_obs_steps: 1 - chunk_size: 100 - n_action_steps: 100 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.images.laptop: [3, 480, 640] - observation.images.phone: [3, 480, 640] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.images.laptop: mean_std - observation.images.phone: mean_std - observation.state: mean_std - output_normalization_modes: - action: mean_std - - # Architecture. - # Vision backbone. - vision_backbone: resnet18 - pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 - replace_final_stride_with_dilation: false - # Transformer layers. - pre_norm: false - dim_model: 512 - n_heads: 8 - dim_feedforward: 3200 - feedforward_activation: relu - n_encoder_layers: 4 - # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code - # that means only the first layer is used. Here we match the original implementation by setting this to 1. - # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. - n_decoder_layers: 1 - # VAE. - use_vae: true - latent_dim: 32 - n_vae_encoder_layers: 4 - - # Inference. - temporal_ensemble_coeff: null - - # Training and loss computation. - dropout: 0.1 - kl_weight: 10.0 diff --git a/lerobot/configs/_deprecated/policy/act_so100_real.yaml b/lerobot/configs/_deprecated/policy/act_so100_real.yaml deleted file mode 100644 index cf5b1f1470..0000000000 --- a/lerobot/configs/_deprecated/policy/act_so100_real.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# @package _global_ - -# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots. -# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top). -# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps. -# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script. -# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy. -# -# Example of usage for training: -# ```bash -# python lerobot/scripts/train.py \ -# policy=act_koch_real \ -# env=koch_real -# ``` - -seed: 1000 -dataset_repo_id: lerobot/so100_pick_place_lego - -override_dataset_stats: - observation.images.laptop: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.phone: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - -training: - offline_steps: 80000 - online_steps: 0 - eval_freq: -1 - save_freq: 10000 - log_freq: 100 - save_checkpoint: true - - batch_size: 8 - lr: 1e-5 - lr_backbone: 1e-5 - weight_decay: 1e-4 - grad_clip_norm: 10 - online_steps_between_rollouts: 1 - - delta_timestamps: - action: "[i / ${fps} for i in range(${policy.chunk_size})]" - -eval: - n_episodes: 50 - batch_size: 50 - -# See `configuration_act.py` for more details. -policy: - name: act - - # Input / output structure. - n_obs_steps: 1 - chunk_size: 100 - n_action_steps: 100 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.images.laptop: [3, 480, 640] - observation.images.phone: [3, 480, 640] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.images.laptop: mean_std - observation.images.phone: mean_std - observation.state: mean_std - output_normalization_modes: - action: mean_std - - # Architecture. - # Vision backbone. - vision_backbone: resnet18 - pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 - replace_final_stride_with_dilation: false - # Transformer layers. - pre_norm: false - dim_model: 512 - n_heads: 8 - dim_feedforward: 3200 - feedforward_activation: relu - n_encoder_layers: 4 - # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code - # that means only the first layer is used. Here we match the original implementation by setting this to 1. - # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. - n_decoder_layers: 1 - # VAE. - use_vae: true - latent_dim: 32 - n_vae_encoder_layers: 4 - - # Inference. - temporal_ensemble_coeff: null - - # Training and loss computation. - dropout: 0.1 - kl_weight: 10.0 diff --git a/lerobot/configs/_deprecated/policy/diffusion.yaml b/lerobot/configs/_deprecated/policy/diffusion.yaml deleted file mode 100644 index 880819bb94..0000000000 --- a/lerobot/configs/_deprecated/policy/diffusion.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# @package _global_ - -# Defaults for training for the PushT dataset as per https://github.com/real-stanford/diffusion_policy. -# Note: We do not track EMA model weights as we discovered it does not improve the results. See -# https://github.com/huggingface/lerobot/pull/134 for more details. - -seed: 100000 -dataset_repo_id: lerobot/pusht - -override_dataset_stats: - # TODO(rcadene, alexander-soare): should we remove image stats as well? do we use a pretrained vision model? - observation.image: - mean: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1) - std: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1) - # TODO(rcadene, alexander-soare): we override state and action stats to use the same as the pretrained model - # from the original codebase, but we should remove these and train our own pretrained model - observation.state: - min: [13.456424, 32.938293] - max: [496.14618, 510.9579] - action: - min: [12.0, 25.0] - max: [511.0, 511.0] - -training: - offline_steps: 200000 - online_steps: 0 - eval_freq: 25000 - save_freq: 25000 - save_checkpoint: true - - batch_size: 64 - grad_clip_norm: 10 - lr: 1.0e-4 - lr_scheduler: cosine - lr_warmup_steps: 500 - adam_betas: [0.95, 0.999] - adam_eps: 1.0e-8 - adam_weight_decay: 1.0e-6 - online_steps_between_rollouts: 1 - - delta_timestamps: - observation.image: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]" - observation.state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]" - action: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1 - ${policy.n_obs_steps} + ${policy.horizon})]" - - # The original implementation doesn't sample frames for the last 7 steps, - # which avoids excessive padding and leads to improved training results. - drop_n_last_frames: 7 # ${policy.horizon} - ${policy.n_action_steps} - ${policy.n_obs_steps} + 1 - -eval: - n_episodes: 50 - batch_size: 50 - -policy: - name: diffusion - - # Input / output structure. - n_obs_steps: 2 - horizon: 16 - n_action_steps: 8 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.image: [3, 96, 96] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.image: mean_std - observation.state: min_max - output_normalization_modes: - action: min_max - - # Architecture / modeling. - # Vision backbone. - vision_backbone: resnet18 - crop_shape: [84, 84] - crop_is_random: True - pretrained_backbone_weights: null - use_group_norm: True - spatial_softmax_num_keypoints: 32 - # Unet. - down_dims: [512, 1024, 2048] - kernel_size: 5 - n_groups: 8 - diffusion_step_embed_dim: 128 - use_film_scale_modulation: True - # Noise scheduler. - noise_scheduler_type: DDPM - num_train_timesteps: 100 - beta_schedule: squaredcos_cap_v2 - beta_start: 0.0001 - beta_end: 0.02 - prediction_type: epsilon # epsilon / sample - clip_sample: True - clip_sample_range: 1.0 - - # Inference - num_inference_steps: null # if not provided, defaults to `num_train_timesteps` - - # Loss computation - do_mask_loss_for_padding: false diff --git a/lerobot/configs/_deprecated/policy/diffusion_pusht_keypoints.yaml b/lerobot/configs/_deprecated/policy/diffusion_pusht_keypoints.yaml deleted file mode 100644 index a5fe6cf936..0000000000 --- a/lerobot/configs/_deprecated/policy/diffusion_pusht_keypoints.yaml +++ /dev/null @@ -1,110 +0,0 @@ -# @package _global_ - -# Defaults for training for the pusht_keypoints dataset. - -# They keypoints are on the vertices of the rectangles that make up the PushT as documented in the PushT -# environment: -# https://github.com/huggingface/gym-pusht/blob/5e2489be9ff99ed9cd47b6c653dda3b7aa844d24/gym_pusht/envs/pusht.py#L522-L534 -# For completeness, the diagram is copied here: -# 0───────────1 -# │ │ -# 3───4───5───2 -# │ │ -# │ │ -# │ │ -# │ │ -# 7───6 - - -# Note: The original work trains keypoints-only with conditioning via inpainting. Here, we encode the -# observation along with the agent position and use the encoding as global conditioning for the denoising -# U-Net. - -# Note: We do not track EMA model weights as we discovered it does not improve the results. See -# https://github.com/huggingface/lerobot/pull/134 for more details. - -seed: 100000 -dataset_repo_id: lerobot/pusht_keypoints - -training: - offline_steps: 200000 - online_steps: 0 - eval_freq: 5000 - save_freq: 5000 - log_freq: 250 - save_checkpoint: true - - batch_size: 64 - grad_clip_norm: 10 - lr: 1.0e-4 - lr_scheduler: cosine - lr_warmup_steps: 500 - adam_betas: [0.95, 0.999] - adam_eps: 1.0e-8 - adam_weight_decay: 1.0e-6 - online_steps_between_rollouts: 1 - - delta_timestamps: - observation.environment_state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]" - observation.state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]" - action: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1 - ${policy.n_obs_steps} + ${policy.horizon})]" - - # The original implementation doesn't sample frames for the last 7 steps, - # which avoids excessive padding and leads to improved training results. - drop_n_last_frames: 7 # ${policy.horizon} - ${policy.n_action_steps} - ${policy.n_obs_steps} + 1 - -eval: - n_episodes: 50 - batch_size: 50 - -policy: - name: diffusion - - # Input / output structure. - n_obs_steps: 2 - horizon: 16 - n_action_steps: 8 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.environment_state: [16] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.environment_state: min_max - observation.state: min_max - output_normalization_modes: - action: min_max - - # Architecture / modeling. - # Vision backbone. - vision_backbone: resnet18 - crop_shape: [84, 84] - crop_is_random: True - pretrained_backbone_weights: null - use_group_norm: True - spatial_softmax_num_keypoints: 32 - # Unet. - down_dims: [256, 512, 1024] - kernel_size: 5 - n_groups: 8 - diffusion_step_embed_dim: 128 - use_film_scale_modulation: True - # Noise scheduler. - noise_scheduler_type: DDIM - num_train_timesteps: 100 - beta_schedule: squaredcos_cap_v2 - beta_start: 0.0001 - beta_end: 0.02 - prediction_type: epsilon # epsilon / sample - clip_sample: True - clip_sample_range: 1.0 - - # Inference - num_inference_steps: 10 # if not provided, defaults to `num_train_timesteps` - - # Loss computation - do_mask_loss_for_padding: false diff --git a/lerobot/configs/_deprecated/policy/tdmpc.yaml b/lerobot/configs/_deprecated/policy/tdmpc.yaml deleted file mode 100644 index 7c85fcf807..0000000000 --- a/lerobot/configs/_deprecated/policy/tdmpc.yaml +++ /dev/null @@ -1,93 +0,0 @@ -# @package _global_ - -seed: 1 -dataset_repo_id: lerobot/xarm_lift_medium - -training: - offline_steps: 50000 - - num_workers: 4 - - batch_size: 256 - grad_clip_norm: 10.0 - lr: 3e-4 - - save_freq: 10000 - eval_freq: 5000 - log_freq: 100 - - online_steps: 50000 - online_rollout_n_episodes: 1 - online_rollout_batch_size: 1 - # Note: in FOWM `online_steps_between_rollouts` is actually dynamically set to match exactly the length of - # the last sampled episode. - online_steps_between_rollouts: 50 - online_sampling_ratio: 0.5 - online_env_seed: 10000 - # FOWM Push uses 10000 for `online_buffer_capacity`. Given that their maximum episode length for this task - # is 25, 10000 is approx 400 of their episodes worth. Since our episodes are about 8 times longer, we'll use - # 80000. - online_buffer_capacity: 80000 - - delta_timestamps: - observation.image: "[i / ${fps} for i in range(${policy.horizon} + 1)]" - observation.state: "[i / ${fps} for i in range(${policy.horizon} + 1)]" - action: "[i / ${fps} for i in range(${policy.horizon})]" - next.reward: "[i / ${fps} for i in range(${policy.horizon})]" - -policy: - name: tdmpc - - pretrained_model_path: - - # Input / output structure. - n_action_repeats: 2 - horizon: 5 - n_action_steps: 1 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.image: [3, 84, 84] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: null - output_normalization_modes: - action: min_max - - # Architecture / modeling. - # Neural networks. - image_encoder_hidden_dim: 32 - state_encoder_hidden_dim: 256 - latent_dim: 50 - q_ensemble_size: 5 - mlp_dim: 512 - # Reinforcement learning. - discount: 0.9 - - # Inference. - use_mpc: true - cem_iterations: 6 - max_std: 2.0 - min_std: 0.05 - n_gaussian_samples: 512 - n_pi_samples: 51 - uncertainty_regularizer_coeff: 1.0 - n_elites: 50 - elite_weighting_temperature: 0.5 - gaussian_mean_momentum: 0.1 - - # Training and loss computation. - max_random_shift_ratio: 0.0476 - # Loss coefficients. - reward_coeff: 0.5 - expectile_weight: 0.9 - value_coeff: 0.1 - consistency_coeff: 20.0 - advantage_scaling: 3.0 - pi_coeff: 0.5 - temporal_decay_coeff: 0.5 - # Target model. - target_model_momentum: 0.995 diff --git a/lerobot/configs/_deprecated/policy/tdmpc_pusht_keypoints.yaml b/lerobot/configs/_deprecated/policy/tdmpc_pusht_keypoints.yaml deleted file mode 100644 index 1cfc5b5276..0000000000 --- a/lerobot/configs/_deprecated/policy/tdmpc_pusht_keypoints.yaml +++ /dev/null @@ -1,105 +0,0 @@ -# @package _global_ - -# Train with: -# -# python lerobot/scripts/train.py \ -# env=pusht \ -# env.gym.obs_type=environment_state_agent_pos \ -# policy=tdmpc_pusht_keypoints \ -# eval.batch_size=50 \ -# eval.n_episodes=50 \ -# eval.use_async_envs=true \ -# device=cuda \ -# use_amp=true - -seed: 1 -dataset_repo_id: lerobot/pusht_keypoints - -training: - offline_steps: 0 - - # Offline training dataloader - num_workers: 4 - - batch_size: 256 - grad_clip_norm: 10.0 - lr: 3e-4 - - eval_freq: 10000 - log_freq: 500 - save_freq: 50000 - - online_steps: 1000000 - online_rollout_n_episodes: 10 - online_rollout_batch_size: 10 - online_steps_between_rollouts: 1000 - online_sampling_ratio: 1.0 - online_env_seed: 10000 - online_buffer_capacity: 40000 - online_buffer_seed_size: 0 - do_online_rollout_async: false - - delta_timestamps: - observation.environment_state: "[i / ${fps} for i in range(${policy.horizon} + 1)]" - observation.state: "[i / ${fps} for i in range(${policy.horizon} + 1)]" - action: "[i / ${fps} for i in range(${policy.horizon})]" - next.reward: "[i / ${fps} for i in range(${policy.horizon})]" - -policy: - name: tdmpc - - pretrained_model_path: - - # Input / output structure. - n_action_repeats: 1 - horizon: 5 - n_action_steps: 5 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.environment_state: [16] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.environment_state: min_max - observation.state: min_max - output_normalization_modes: - action: min_max - - # Architecture / modeling. - # Neural networks. - image_encoder_hidden_dim: 32 - state_encoder_hidden_dim: 256 - latent_dim: 50 - q_ensemble_size: 5 - mlp_dim: 512 - # Reinforcement learning. - discount: 0.98 - - # Inference. - use_mpc: true - cem_iterations: 6 - max_std: 2.0 - min_std: 0.05 - n_gaussian_samples: 512 - n_pi_samples: 51 - uncertainty_regularizer_coeff: 1.0 - n_elites: 50 - elite_weighting_temperature: 0.5 - gaussian_mean_momentum: 0.1 - - # Training and loss computation. - max_random_shift_ratio: 0.0476 - # Loss coefficients. - reward_coeff: 0.5 - expectile_weight: 0.9 - value_coeff: 0.1 - consistency_coeff: 20.0 - advantage_scaling: 3.0 - pi_coeff: 0.5 - temporal_decay_coeff: 0.5 - # Target model. - target_model_momentum: 0.995 diff --git a/lerobot/configs/_deprecated/policy/vqbet.yaml b/lerobot/configs/_deprecated/policy/vqbet.yaml deleted file mode 100644 index cc70d61ad0..0000000000 --- a/lerobot/configs/_deprecated/policy/vqbet.yaml +++ /dev/null @@ -1,103 +0,0 @@ -# @package _global_ - -# Defaults for training for the PushT dataset. - -seed: 100000 -dataset_repo_id: lerobot/pusht - -override_dataset_stats: - # TODO(rcadene, alexander-soare): should we remove image stats as well? do we use a pretrained vision model? - observation.image: - mean: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1) - std: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1) - # TODO(rcadene, alexander-soare): we override state and action stats to use the same as the pretrained model - # from the original codebase, but we should remove these and train our own pretrained model - observation.state: - min: [13.456424, 32.938293] - max: [496.14618, 510.9579] - action: - min: [12.0, 25.0] - max: [511.0, 511.0] - -training: - offline_steps: 250000 - online_steps: 0 - eval_freq: 25000 - save_freq: 25000 - save_checkpoint: true - - batch_size: 64 - grad_clip_norm: 10 - lr: 1.0e-4 - lr_scheduler: cosine - lr_warmup_steps: 500 - adam_betas: [0.95, 0.999] - adam_eps: 1.0e-8 - adam_weight_decay: 1.0e-6 - online_steps_between_rollouts: 1 - - # VQ-BeT specific - vqvae_lr: 1.0e-3 - n_vqvae_training_steps: 20000 - bet_weight_decay: 2e-4 - bet_learning_rate: 5.5e-5 - bet_betas: [0.9, 0.999] - - delta_timestamps: - observation.image: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]" - observation.state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]" - action: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, ${policy.n_action_pred_token} + ${policy.action_chunk_size} - 1)]" - -eval: - n_episodes: 50 - batch_size: 50 - -policy: - name: vqbet - - # Input / output structure. - n_obs_steps: 5 - n_action_pred_token: 7 - action_chunk_size: 5 - - input_shapes: - # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.image: [3, 96, 96] - observation.state: ["${env.state_dim}"] - output_shapes: - action: ["${env.action_dim}"] - - # Normalization / Unnormalization - input_normalization_modes: - observation.image: mean_std - observation.state: min_max - output_normalization_modes: - action: min_max - - # Architecture / modeling. - # Vision backbone. - vision_backbone: resnet18 - crop_shape: [84, 84] - crop_is_random: True - pretrained_backbone_weights: null - use_group_norm: True - spatial_softmax_num_keypoints: 32 - # VQ-VAE - n_vqvae_training_steps: ${training.n_vqvae_training_steps} - vqvae_n_embed: 16 - vqvae_embedding_dim: 256 - vqvae_enc_hidden_dim: 128 - # VQ-BeT - gpt_block_size: 500 - gpt_input_dim: 512 - gpt_output_dim: 512 - gpt_n_layer: 8 - gpt_n_head: 8 - gpt_hidden_dim: 512 - dropout: 0.1 - mlp_hidden_dim: 1024 - offset_loss_weight: 10000. - primary_code_loss_weight: 5.0 - secondary_code_loss_weight: 0.5 - bet_softmax_temperature: 0.1 - sequentially_select: False diff --git a/lerobot/configs/_deprecated/robot/aloha.yaml b/lerobot/configs/_deprecated/robot/aloha.yaml deleted file mode 100644 index d8bca515f0..0000000000 --- a/lerobot/configs/_deprecated/robot/aloha.yaml +++ /dev/null @@ -1,117 +0,0 @@ -# [Aloha: A Low-Cost Hardware for Bimanual Teleoperation](https://www.trossenrobotics.com/aloha-stationary) -# https://aloha-2.github.io - -# Requires installing extras packages -# With pip: `pip install -e ".[dynamixel intelrealsense]"` -# With poetry: `poetry install --sync --extras "dynamixel intelrealsense"` - -# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/9_use_aloha.md) - - -_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot -robot_type: aloha -# Specific to Aloha, LeRobot comes with default calibration files. Assuming the motors have been -# properly assembled, no manual calibration step is expected. If you need to run manual calibration, -# simply update this path to ".cache/calibration/aloha" -calibration_dir: .cache/calibration/aloha_default - -# /!\ FOR SAFETY, READ THIS /!\ -# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. -# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as -# the number of motors in your follower arms. -# For Aloha, for every goal position request, motor rotations are capped at 5 degrees by default. -# When you feel more confident with teleoperation or running the policy, you can extend -# this safety limit and even removing it by setting it to `null`. -# Also, everything is expected to work safely out-of-the-box, but we highly advise to -# first try to teleoperate the grippers only (by commenting out the rest of the motors in this yaml), -# then to gradually add more motors (by uncommenting), until you can teleoperate both arms fully -max_relative_target: 5 - -leader_arms: - left: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/ttyDXL_leader_left - motors: # window_x - # name: (index, model) - waist: [1, xm430-w350] - shoulder: [2, xm430-w350] - shoulder_shadow: [3, xm430-w350] - elbow: [4, xm430-w350] - elbow_shadow: [5, xm430-w350] - forearm_roll: [6, xm430-w350] - wrist_angle: [7, xm430-w350] - wrist_rotate: [8, xl430-w250] - gripper: [9, xc430-w150] - right: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/ttyDXL_leader_right - motors: # window_x - # name: (index, model) - waist: [1, xm430-w350] - shoulder: [2, xm430-w350] - shoulder_shadow: [3, xm430-w350] - elbow: [4, xm430-w350] - elbow_shadow: [5, xm430-w350] - forearm_roll: [6, xm430-w350] - wrist_angle: [7, xm430-w350] - wrist_rotate: [8, xl430-w250] - gripper: [9, xc430-w150] - -follower_arms: - left: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/ttyDXL_follower_left - motors: - # name: [index, model] - waist: [1, xm540-w270] - shoulder: [2, xm540-w270] - shoulder_shadow: [3, xm540-w270] - elbow: [4, xm540-w270] - elbow_shadow: [5, xm540-w270] - forearm_roll: [6, xm540-w270] - wrist_angle: [7, xm540-w270] - wrist_rotate: [8, xm430-w350] - gripper: [9, xm430-w350] - right: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/ttyDXL_follower_right - motors: - # name: [index, model] - waist: [1, xm540-w270] - shoulder: [2, xm540-w270] - shoulder_shadow: [3, xm540-w270] - elbow: [4, xm540-w270] - elbow_shadow: [5, xm540-w270] - forearm_roll: [6, xm540-w270] - wrist_angle: [7, xm540-w270] - wrist_rotate: [8, xm430-w350] - gripper: [9, xm430-w350] - -# Troubleshooting: If one of your IntelRealSense cameras freeze during -# data recording due to bandwidth limit, you might need to plug the camera -# on another USB hub or PCIe card. -cameras: - cam_high: - _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera - serial_number: 128422271347 - fps: 30 - width: 640 - height: 480 - cam_low: - _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera - serial_number: 130322270656 - fps: 30 - width: 640 - height: 480 - cam_left_wrist: - _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera - serial_number: 218622272670 - fps: 30 - width: 640 - height: 480 - cam_right_wrist: - _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera - serial_number: 130322272300 - fps: 30 - width: 640 - height: 480 diff --git a/lerobot/configs/_deprecated/robot/koch.yaml b/lerobot/configs/_deprecated/robot/koch.yaml deleted file mode 100644 index 40969dc73d..0000000000 --- a/lerobot/configs/_deprecated/robot/koch.yaml +++ /dev/null @@ -1,53 +0,0 @@ -_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot -robot_type: koch -calibration_dir: .cache/calibration/koch - -# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. -# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as -# the number of motors in your follower arms. -max_relative_target: null - -leader_arms: - main: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/tty.usbmodem575E0031751 - motors: - # name: (index, model) - shoulder_pan: [1, "xl330-m077"] - shoulder_lift: [2, "xl330-m077"] - elbow_flex: [3, "xl330-m077"] - wrist_flex: [4, "xl330-m077"] - wrist_roll: [5, "xl330-m077"] - gripper: [6, "xl330-m077"] - -follower_arms: - main: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/tty.usbmodem575E0032081 - motors: - # name: (index, model) - shoulder_pan: [1, "xl430-w250"] - shoulder_lift: [2, "xl430-w250"] - elbow_flex: [3, "xl330-m288"] - wrist_flex: [4, "xl330-m288"] - wrist_roll: [5, "xl330-m288"] - gripper: [6, "xl330-m288"] - -cameras: - laptop: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 0 - fps: 30 - width: 640 - height: 480 - phone: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 1 - fps: 30 - width: 640 - height: 480 - -# ~ Koch specific settings ~ -# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible -# to squeeze the gripper and have it spring back to an open position on its own. -gripper_open_degree: 35.156 diff --git a/lerobot/configs/_deprecated/robot/koch_bimanual.yaml b/lerobot/configs/_deprecated/robot/koch_bimanual.yaml deleted file mode 100644 index b551d15de9..0000000000 --- a/lerobot/configs/_deprecated/robot/koch_bimanual.yaml +++ /dev/null @@ -1,75 +0,0 @@ -_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot -robot_type: koch_bimanual -calibration_dir: .cache/calibration/koch_bimanual - -# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. -# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as -# the number of motors in your follower arms. -max_relative_target: null - -leader_arms: - left: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/tty.usbmodem585A0085511 - motors: - # name: (index, model) - shoulder_pan: [1, "xl330-m077"] - shoulder_lift: [2, "xl330-m077"] - elbow_flex: [3, "xl330-m077"] - wrist_flex: [4, "xl330-m077"] - wrist_roll: [5, "xl330-m077"] - gripper: [6, "xl330-m077"] - right: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/tty.usbmodem575E0031751 - motors: - # name: (index, model) - shoulder_pan: [1, "xl330-m077"] - shoulder_lift: [2, "xl330-m077"] - elbow_flex: [3, "xl330-m077"] - wrist_flex: [4, "xl330-m077"] - wrist_roll: [5, "xl330-m077"] - gripper: [6, "xl330-m077"] - -follower_arms: - left: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/tty.usbmodem585A0076891 - motors: - # name: (index, model) - shoulder_pan: [1, "xl430-w250"] - shoulder_lift: [2, "xl430-w250"] - elbow_flex: [3, "xl330-m288"] - wrist_flex: [4, "xl330-m288"] - wrist_roll: [5, "xl330-m288"] - gripper: [6, "xl330-m288"] - right: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/tty.usbmodem575E0032081 - motors: - # name: (index, model) - shoulder_pan: [1, "xl430-w250"] - shoulder_lift: [2, "xl430-w250"] - elbow_flex: [3, "xl330-m288"] - wrist_flex: [4, "xl330-m288"] - wrist_roll: [5, "xl330-m288"] - gripper: [6, "xl330-m288"] - -cameras: - laptop: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 0 - fps: 30 - width: 640 - height: 480 - phone: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 1 - fps: 30 - width: 640 - height: 480 - -# ~ Koch specific settings ~ -# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible -# to squeeze the gripper and have it spring back to an open position on its own. -gripper_open_degree: 35.156 diff --git a/lerobot/configs/_deprecated/robot/moss.yaml b/lerobot/configs/_deprecated/robot/moss.yaml deleted file mode 100644 index 8a9019851b..0000000000 --- a/lerobot/configs/_deprecated/robot/moss.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# [Moss v1 robot arm](https://github.com/jess-moss/moss-robot-arms) - -# Requires installing extras packages -# With pip: `pip install -e ".[feetech]"` -# With poetry: `poetry install --sync --extras "feetech"` - -# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/11_use_moss.md) - -_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot -robot_type: moss -calibration_dir: .cache/calibration/moss - -# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. -# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as -# the number of motors in your follower arms. -max_relative_target: null - -leader_arms: - main: - _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus - port: /dev/tty.usbmodem58760431091 - motors: - # name: (index, model) - shoulder_pan: [1, "sts3215"] - shoulder_lift: [2, "sts3215"] - elbow_flex: [3, "sts3215"] - wrist_flex: [4, "sts3215"] - wrist_roll: [5, "sts3215"] - gripper: [6, "sts3215"] - -follower_arms: - main: - _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus - port: /dev/tty.usbmodem58760431191 - motors: - # name: (index, model) - shoulder_pan: [1, "sts3215"] - shoulder_lift: [2, "sts3215"] - elbow_flex: [3, "sts3215"] - wrist_flex: [4, "sts3215"] - wrist_roll: [5, "sts3215"] - gripper: [6, "sts3215"] - -cameras: - laptop: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 0 - fps: 30 - width: 640 - height: 480 - phone: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 1 - fps: 30 - width: 640 - height: 480 diff --git a/lerobot/configs/_deprecated/robot/so100.yaml b/lerobot/configs/_deprecated/robot/so100.yaml deleted file mode 100644 index ec6f3e3fe2..0000000000 --- a/lerobot/configs/_deprecated/robot/so100.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# [SO-100 robot arm](https://github.com/TheRobotStudio/SO-ARM100) - -# Requires installing extras packages -# With pip: `pip install -e ".[feetech]"` -# With poetry: `poetry install --sync --extras "feetech"` - -# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md) - -_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot -robot_type: so100 -calibration_dir: .cache/calibration/so100 - -# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. -# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as -# the number of motors in your follower arms. -max_relative_target: null - -leader_arms: - main: - _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus - port: /dev/tty.usbmodem585A0077581 - motors: - # name: (index, model) - shoulder_pan: [1, "sts3215"] - shoulder_lift: [2, "sts3215"] - elbow_flex: [3, "sts3215"] - wrist_flex: [4, "sts3215"] - wrist_roll: [5, "sts3215"] - gripper: [6, "sts3215"] - -follower_arms: - main: - _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus - port: /dev/tty.usbmodem585A0080971 - motors: - # name: (index, model) - shoulder_pan: [1, "sts3215"] - shoulder_lift: [2, "sts3215"] - elbow_flex: [3, "sts3215"] - wrist_flex: [4, "sts3215"] - wrist_roll: [5, "sts3215"] - gripper: [6, "sts3215"] - -cameras: - laptop: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 0 - fps: 30 - width: 640 - height: 480 - phone: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 1 - fps: 30 - width: 640 - height: 480 diff --git a/lerobot/configs/_deprecated/robot/stretch.yaml b/lerobot/configs/_deprecated/robot/stretch.yaml deleted file mode 100644 index e29966b6f3..0000000000 --- a/lerobot/configs/_deprecated/robot/stretch.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# [Stretch3 from Hello Robot](https://hello-robot.com/stretch-3-product) - -# Requires installing extras packages -# With pip: `pip install -e ".[stretch]"` -# With poetry: `poetry install --sync --extras "stretch"` - -# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/8_use_stretch.md) - - -_target_: lerobot.common.robot_devices.robots.stretch.StretchRobot -robot_type: stretch3 - -cameras: - navigation: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: /dev/hello-nav-head-camera - fps: 10 - width: 1280 - height: 720 - rotation: -90 - head: - _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera.init_from_name - name: Intel RealSense D435I - fps: 30 - width: 640 - height: 480 - rotation: 90 - wrist: - _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera.init_from_name - name: Intel RealSense D405 - fps: 30 - width: 640 - height: 480 From 214083f8b9a22e8277b7fb2a56711b74cc7a6c94 Mon Sep 17 00:00:00 2001 From: Remi Date: Tue, 28 Jan 2025 12:42:29 +0100 Subject: [PATCH 79/94] Update robot examples with draccus commands (#654) --- examples/10_use_so100.md | 176 ++++++---- examples/11_use_moss.md | 170 ++++++--- examples/7_get_started_with_real_robot.md | 325 ++++++++---------- examples/8_use_stretch.md | 43 ++- examples/9_use_aloha.md | 113 +++--- .../robot_devices/cameras/intelrealsense.py | 58 ++-- .../common/robot_devices/cameras/opencv.py | 26 +- lerobot/common/robot_devices/cameras/utils.py | 12 +- .../common/robot_devices/control_configs.py | 32 +- .../common/robot_devices/motors/dynamixel.py | 17 +- .../common/robot_devices/motors/feetech.py | 16 +- lerobot/common/robot_devices/motors/utils.py | 12 +- .../common/robot_devices/robots/configs.py | 2 +- .../robot_devices/robots/manipulator.py | 6 +- lerobot/common/utils/utils.py | 20 ++ lerobot/configs/eval.py | 31 +- lerobot/scripts/control_robot.py | 53 +-- lerobot/scripts/train.py | 3 +- 18 files changed, 626 insertions(+), 489 deletions(-) diff --git a/examples/10_use_so100.md b/examples/10_use_so100.md index 155bbe5198..293212f24c 100644 --- a/examples/10_use_so100.md +++ b/examples/10_use_so100.md @@ -16,9 +16,9 @@ On your computer: mkdir -p ~/miniconda3 # Linux: wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh -# Mac M-series: +# Mac M-series: # curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o ~/miniconda3/miniconda.sh -# Mac Intel: +# Mac Intel: # curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o ~/miniconda3/miniconda.sh bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 rm ~/miniconda3/miniconda.sh @@ -96,9 +96,53 @@ sudo chmod 666 /dev/ttyACM0 sudo chmod 666 /dev/ttyACM1 ``` -#### d. Update YAML file - -Now that you have the ports, modify the *port* sections in `so100.yaml` +#### d. Update config file + +IMPORTANTLY: Now that you have your ports, update the **port** default values of [`SO100RobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like: +```python +@RobotConfig.register_subclass("so100") +@dataclass +class So100RobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/so100" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) +``` ### 2. Configure the motors @@ -155,9 +199,11 @@ You will need to move the follower arm to these positions sequentially: Make sure both arms are connected and run this script to launch manual calibration: ```bash -python lerobot/scripts/control_robot.py calibrate \ - --robot-path lerobot/configs/robot/so100.yaml \ - --robot-overrides '~cameras' --arms main_follower +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --robot.cameras='{}' \ + --control.type=calibrate \ + --control.arms='["main_follower"]' ``` #### b. Manual calibration of leader arm @@ -169,9 +215,11 @@ Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which Run this script to launch manual calibration: ```bash -python lerobot/scripts/control_robot.py calibrate \ - --robot-path lerobot/configs/robot/so100.yaml \ - --robot-overrides '~cameras' --arms main_leader +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --robot.cameras='{}' \ + --control.type=calibrate \ + --control.arms='["main_leader"]' ``` ## F. Teleoperate @@ -179,18 +227,19 @@ python lerobot/scripts/control_robot.py calibrate \ **Simple teleop** Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras): ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/so100.yaml \ - --robot-overrides '~cameras' \ - --display-cameras 0 +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --robot.cameras='{}' \ + --control.type=teleoperate ``` #### a. Teleop with displaying cameras Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset. ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/so100.yaml +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=teleoperate ``` ## G. Record a dataset @@ -210,61 +259,69 @@ echo $HF_USER Record 2 episodes and upload your dataset to the hub: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/so100.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/so100_test \ - --tags so100 tutorial \ - --warmup-time-s 5 \ - --episode-time-s 40 \ - --reset-time-s 10 \ - --num-episodes 2 \ - --push-to-hub 1 +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/so100_test \ + --control.tags='["so100","tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=2 \ + --control.push_to_hub=true ``` +Note: You can resume recording by adding `--control.resume=true`. Also if you didn't push your dataset yet, add `--control.local_files_only=true`. + ## H. Visualize a dataset -If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: +If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: ```bash echo ${HF_USER}/so100_test ``` -If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with: +If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with: ```bash python lerobot/scripts/visualize_dataset_html.py \ - --repo-id ${HF_USER}/so100_test + --repo-id ${HF_USER}/so100_test \ + --local-files-only 1 ``` ## I. Replay an episode Now try to replay the first episode on your robot: ```bash -python lerobot/scripts/control_robot.py replay \ - --robot-path lerobot/configs/robot/so100.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/so100_test \ - --episode 0 +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=replay \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/so100_test \ + --control.episode=0 ``` +Note: If you didn't push your dataset yet, add `--control.local_files_only=true`. + ## J. Train a policy To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash python lerobot/scripts/train.py \ - dataset_repo_id=${HF_USER}/so100_test \ - policy=act_so100_real \ - env=so100_real \ - hydra.run.dir=outputs/train/act_so100_test \ - hydra.job.name=act_so100_test \ - device=cuda \ - wandb.enable=true + --dataset.repo_id=${HF_USER}/so100_test \ + --policy.type=act \ + --output_dir=outputs/train/act_so100_test \ + --job_name=act_so100_test \ + --device=cuda \ + --wandb.enable=true ``` +Note: If you didn't push your dataset yet, add `--control.local_files_only=true`. + Let's explain it: -1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/so100_test`. -2. We provided the policy with `policy=act_so100_real`. This loads configurations from [`lerobot/configs/policy/act_so100_real.yaml`](../lerobot/configs/policy/act_so100_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`. -3. We provided an environment as argument with `env=so100_real`. This loads configurations from [`lerobot/configs/env/so100_real.yaml`](../lerobot/configs/env/so100_real.yaml). -4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise. +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`. @@ -273,21 +330,24 @@ Training should take several hours. You will find checkpoints in `outputs/train/ You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/so100.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/eval_act_so100_test \ - --tags so100 tutorial eval \ - --warmup-time-s 5 \ - --episode-time-s 40 \ - --reset-time-s 10 \ - --num-episodes 10 \ - -p outputs/train/act_so100_test/checkpoints/last/pretrained_model +python lerobot/scripts/control_robot.py \ + --robot.type=so100 \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/eval_act_so100_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=10 \ + --control.push_to_hub=true \ + --control.policy.path=outputs/train/act_so100_test/checkpoints/last/pretrained_model ``` As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: -1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_so100_test`). -2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_so100_test`). +1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so100_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so100_test`). ## L. More Information diff --git a/examples/11_use_moss.md b/examples/11_use_moss.md index 55d6fcaf94..e35ba9b278 100644 --- a/examples/11_use_moss.md +++ b/examples/11_use_moss.md @@ -83,6 +83,54 @@ sudo chmod 666 /dev/ttyACM0 sudo chmod 666 /dev/ttyACM1 ``` +#### Update config file + +IMPORTANTLY: Now that you have your ports, update the **port** default values of [`MossRobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like: +```python +@RobotConfig.register_subclass("moss") +@dataclass +class MossRobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/moss" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) +``` + **Configure your motors** Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate: ```bash @@ -134,9 +182,11 @@ You will need to move the follower arm to these positions sequentially: Make sure both arms are connected and run this script to launch manual calibration: ```bash -python lerobot/scripts/control_robot.py calibrate \ - --robot-path lerobot/configs/robot/moss.yaml \ - --robot-overrides '~cameras' --arms main_follower +python lerobot/scripts/control_robot.py \ + --robot.type=moss \ + --robot.cameras='{}' \ + --control.type=calibrate \ + --control.arms='["main_follower"]' ``` **Manual calibration of leader arm** @@ -148,9 +198,11 @@ Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMi Run this script to launch manual calibration: ```bash -python lerobot/scripts/control_robot.py calibrate \ - --robot-path lerobot/configs/robot/moss.yaml \ - --robot-overrides '~cameras' --arms main_leader +python lerobot/scripts/control_robot.py \ + --robot.type=moss \ + --robot.cameras='{}' \ + --control.type=calibrate \ + --control.arms='["main_leader"]' ``` ## Teleoperate @@ -158,18 +210,19 @@ python lerobot/scripts/control_robot.py calibrate \ **Simple teleop** Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras): ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/moss.yaml \ - --robot-overrides '~cameras' \ - --display-cameras 0 +python lerobot/scripts/control_robot.py \ + --robot.type=moss \ + --robot.cameras='{}' \ + --control.type=teleoperate ``` **Teleop with displaying cameras** Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset. ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/moss.yaml +python lerobot/scripts/control_robot.py \ + --robot.type=moss \ + --control.type=teleoperate ``` ## Record a dataset @@ -189,61 +242,69 @@ echo $HF_USER Record 2 episodes and upload your dataset to the hub: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/moss.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/moss_test \ - --tags moss tutorial \ - --warmup-time-s 5 \ - --episode-time-s 40 \ - --reset-time-s 10 \ - --num-episodes 2 \ - --push-to-hub 1 +python lerobot/scripts/control_robot.py \ + --robot.type=moss \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/moss_test \ + --control.tags='["moss","tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=2 \ + --control.push_to_hub=true ``` +Note: You can resume recording by adding `--control.resume=true`. Also if you didn't push your dataset yet, add `--control.local_files_only=true`. + ## Visualize a dataset -If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: +If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: ```bash echo ${HF_USER}/moss_test ``` -If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with: +If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with: ```bash python lerobot/scripts/visualize_dataset_html.py \ - --repo-id ${HF_USER}/moss_test + --repo-id ${HF_USER}/moss_test \ + --local-files-only 1 ``` ## Replay an episode Now try to replay the first episode on your robot: ```bash -python lerobot/scripts/control_robot.py replay \ - --robot-path lerobot/configs/robot/moss.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/moss_test \ - --episode 0 +python lerobot/scripts/control_robot.py \ + --robot.type=moss \ + --control.type=replay \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/moss_test \ + --control.episode=0 ``` +Note: If you didn't push your dataset yet, add `--control.local_files_only=true`. + ## Train a policy To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash python lerobot/scripts/train.py \ - dataset_repo_id=${HF_USER}/moss_test \ - policy=act_moss_real \ - env=moss_real \ - hydra.run.dir=outputs/train/act_moss_test \ - hydra.job.name=act_moss_test \ - device=cuda \ - wandb.enable=true + --dataset.repo_id=${HF_USER}/moss_test \ + --policy.type=act \ + --output_dir=outputs/train/act_moss_test \ + --job_name=act_moss_test \ + --device=cuda \ + --wandb.enable=true ``` +Note: If you didn't push your dataset yet, add `--control.local_files_only=true`. + Let's explain it: -1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/moss_test`. -2. We provided the policy with `policy=act_moss_real`. This loads configurations from [`lerobot/configs/policy/act_moss_real.yaml`](../lerobot/configs/policy/act_moss_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`. -3. We provided an environment as argument with `env=moss_real`. This loads configurations from [`lerobot/configs/env/moss_real.yaml`](../lerobot/configs/env/moss_real.yaml). -4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise. +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/moss_test`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`. @@ -252,21 +313,24 @@ Training should take several hours. You will find checkpoints in `outputs/train/ You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/moss.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/eval_act_moss_test \ - --tags moss tutorial eval \ - --warmup-time-s 5 \ - --episode-time-s 40 \ - --reset-time-s 10 \ - --num-episodes 10 \ - -p outputs/train/act_moss_test/checkpoints/last/pretrained_model +python lerobot/scripts/control_robot.py \ + --robot.type=moss \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/eval_act_moss_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=10 \ + --control.push_to_hub=true \ + --control.policy.path=outputs/train/act_moss_test/checkpoints/last/pretrained_model ``` As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: -1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_moss_test`). -2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_moss_test`). +1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_moss_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_moss_test`). ## More diff --git a/examples/7_get_started_with_real_robot.md b/examples/7_get_started_with_real_robot.md index c86508e7c7..3d863f5699 100644 --- a/examples/7_get_started_with_real_robot.md +++ b/examples/7_get_started_with_real_robot.md @@ -58,9 +58,9 @@ Finally, connect both arms to your computer via USB. Note that the USB doesn't p In the upcoming sections, you'll learn about our classes and functions by running some python code, in an interactive session, or by copy-pasting it in a python file. If this is your first time using the tutorial., we highly recommend going through these steps to get deeper intuition about how things work. Once you're more familiar, you can streamline the process by directly running the teleoperate script (which is detailed further in the tutorial): ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/koch.yaml \ - --robot-overrides '~cameras' # do not instantiate the cameras +python lerobot/scripts/control_robot.py \ + --robot.type=koch \ + --control.type=teleoperate ``` It will automatically: @@ -105,10 +105,10 @@ The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081 Reconnect the usb cable. ``` -Troubleshooting: On Linux, you might need to give access to the USB ports by running: +Troubleshooting: On Linux, you might need to give access to the USB ports by running this command with your ports: ```bash -sudo chmod 666 /dev/ttyACM0 -sudo chmod 666 /dev/ttyACM1 +sudo chmod 666 /dev/tty.usbmodem575E0032081 +sudo chmod 666 /dev/tty.usbmodem575E0031751 ``` *Listing and Configuring Motors* @@ -117,13 +117,11 @@ Next, you'll need to list the motors for each arm, including their name, index, To assign indices to the motors, run this code in an interactive Python session. Replace the `port` values with the ones you identified earlier: ```python +from lerobot.common.robot_devices.motors.configs import DynamixelMotorsBusConfig from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus -leader_port = "/dev/tty.usbmodem575E0031751" -follower_port = "/dev/tty.usbmodem575E0032081" - -leader_arm = DynamixelMotorsBus( - port=leader_port, +leader_config = DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem575E0031751", motors={ # name: (index, model) "shoulder_pan": (1, "xl330-m077"), @@ -135,8 +133,8 @@ leader_arm = DynamixelMotorsBus( }, ) -follower_arm = DynamixelMotorsBus( - port=follower_port, +follower_config = DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem575E0032081", motors={ # name: (index, model) "shoulder_pan": (1, "xl430-w250"), @@ -147,44 +145,56 @@ follower_arm = DynamixelMotorsBus( "gripper": (6, "xl330-m288"), }, ) -``` - -*Updating the YAML Configuration File* -Next, update the port values in the YAML configuration file for the Koch robot at [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml) with the ports you've identified: -```yaml -[...] -robot_type: koch -leader_arms: - main: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/tty.usbmodem575E0031751 # <- Update - motors: - # name: (index, model) - shoulder_pan: [1, "xl330-m077"] - shoulder_lift: [2, "xl330-m077"] - elbow_flex: [3, "xl330-m077"] - wrist_flex: [4, "xl330-m077"] - wrist_roll: [5, "xl330-m077"] - gripper: [6, "xl330-m077"] -follower_arms: - main: - _target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus - port: /dev/tty.usbmodem575E0032081 # <- Update - motors: - # name: (index, model) - shoulder_pan: [1, "xl430-w250"] - shoulder_lift: [2, "xl430-w250"] - elbow_flex: [3, "xl330-m288"] - wrist_flex: [4, "xl330-m288"] - wrist_roll: [5, "xl330-m288"] - gripper: [6, "xl330-m288"] -[...] +leader_arm = DynamixelMotorsBus(leader_config) +follower_arm = DynamixelMotorsBus(follower_config) ``` -Don't forget to set `robot_type: aloha` if you follow this tutorial with [Aloha bimanual robot](aloha-2.github.io) instead of Koch v1.1 - -This configuration file is used to instantiate your robot across all scripts. We'll cover how this works later on. +IMPORTANTLY: Now that you have your ports, update [`KochRobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like: +```python +@RobotConfig.register_subclass("koch") +@dataclass +class KochRobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/koch" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem585A0085511", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "xl330-m077"], + "shoulder_lift": [2, "xl330-m077"], + "elbow_flex": [3, "xl330-m077"], + "wrist_flex": [4, "xl330-m077"], + "wrist_roll": [5, "xl330-m077"], + "gripper": [6, "xl330-m077"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": DynamixelMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "xl430-w250"], + "shoulder_lift": [2, "xl430-w250"], + "elbow_flex": [3, "xl330-m288"], + "wrist_flex": [4, "xl330-m288"], + "wrist_roll": [5, "xl330-m288"], + "gripper": [6, "xl330-m288"], + }, + ), + } + ) +``` **Connect and Configure your Motors** @@ -312,27 +322,27 @@ Alternatively, you can unplug the power cord, which will automatically disable t **Instantiate the ManipulatorRobot** -Before you can teleoperate your robot, you need to instantiate the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) using the previously defined `leader_arm` and `follower_arm`. +Before you can teleoperate your robot, you need to instantiate the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) using the previously defined `leader_config` and `follower_config`. -For the Koch v1.1 robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_arm}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_arm, "right": right_leader_arm},`. Same thing for the follower arms. +For the Koch v1.1 robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_config}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_config, "right": right_leader_config},`. Same thing for the follower arms. -You also need to provide a path to a calibration directory, such as `calibration_dir=".cache/calibration/koch"`. More on this in the next section. Run the following code to instantiate your manipulator robot: ```python +from lerobot.common.robot_devices.robots.configs import KochRobotConfig from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot -robot = ManipulatorRobot( - robot_type="koch", - leader_arms={"main": leader_arm}, - follower_arms={"main": follower_arm}, - calibration_dir=".cache/calibration/koch", +robot_config = KochRobotConfig( + leader_arms={"main": leader_config}, + follower_arms={"main": follower_config}, + cameras={}, # We don't use any camera for now ) +robot = ManipulatorRobot(robot_config) ``` -The `robot_type="koch"` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger. +The `KochRobotConfig` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger. -For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `robot_type="aloha"` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected. If you need to run manual calibration, simply update `calibration_dir` to `.cache/calibration/aloha`. +For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `AlohaRobotConfig` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected for Aloha. **Calibrate and Connect the ManipulatorRobot** @@ -579,9 +589,11 @@ Note: Some cameras may take a few seconds to warm up, and the first frame might Finally, run this code to instantiate and connectyour camera: ```python +from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera -camera = OpenCVCamera(camera_index=0) +camera_config = OpenCVCameraConfig(camera_index=0) +camera = OpenCVCamera(config) camera.connect() color_image = camera.read() @@ -603,7 +615,7 @@ uint8 With certain camera, you can also specify additional parameters like frame rate, resolution, and color mode during instantiation. For instance: ```python -camera = OpenCVCamera(camera_index=0, fps=30, width=640, height=480) +config = OpenCVCameraConfig(camera_index=0, fps=30, width=640, height=480) ``` If the provided arguments are not compatible with the camera, an exception will be raised. @@ -626,8 +638,8 @@ robot = ManipulatorRobot( follower_arms={"main": follower_arm}, calibration_dir=".cache/calibration/koch", cameras={ - "laptop": OpenCVCamera(0, fps=30, width=640, height=480), - "phone": OpenCVCamera(1, fps=30, width=640, height=480), + "laptop": OpenCVCameraConfig(0, fps=30, width=640, height=480), + "phone": OpenCVCameraConfig(1, fps=30, width=640, height=480), }, ) robot.connect() @@ -652,34 +664,15 @@ torch.Size([3, 480, 640]) 255 ``` -Also, update the following lines of the yaml file for Koch robot [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml) with the names and configurations of your cameras: -```yaml -[...] -cameras: - laptop: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 0 - fps: 30 - width: 640 - height: 480 - phone: - _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera - camera_index: 1 - fps: 30 - width: 640 - height: 480 -``` - -This file is used to instantiate your robot in all our scripts. We will explain how this works in the next section. +### d. Use `control_robot.py` and our `teleoperate` function -### d. Use `koch.yaml` and our `teleoperate` function - -Instead of manually running the python code in a terminal window, you can use [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to instantiate your robot by providing the path to the robot yaml file (e.g. [`lerobot/configs/robot/koch.yaml`](../lerobot/configs/robot/koch.yaml)) and control your robot with various modes as explained next. +Instead of manually running the python code in a terminal window, you can use [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to instantiate your robot by providing the robot configurations via command line and control your robot with various modes as explained next. Try running this code to teleoperate your robot (if you dont have a camera, keep reading): ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/koch.yaml +python lerobot/scripts/control_robot.py \ + --robot.type=koch \ + --control.type=teleoperate ``` You will see a lot of lines appearing like this one: @@ -694,21 +687,12 @@ It contains - `dtRlead: 4.93 (203.0hz)` which is the number of milliseconds it took to read the position of the leader arm using `leader_arm.read("Present_Position")`. - `dtWfoll: 0.22 (4446.9hz)` which is the number of milliseconds it took to set a new goal position for the follower arm using `follower_arm.write("Goal_position", leader_pos)` ; note that writing is done asynchronously so it takes less time than reading. -Note: you can override any entry in the yaml file using `--robot-overrides` and the [hydra.cc](https://hydra.cc/docs/advanced/override_grammar/basic) syntax. If needed, you can override the ports like this: -```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/koch.yaml \ - --robot-overrides \ - leader_arms.main.port=/dev/tty.usbmodem575E0031751 \ - follower_arms.main.port=/dev/tty.usbmodem575E0032081 -``` - -Importantly: If you don't have any camera, you can remove them dynamically with this [hydra.cc](https://hydra.cc/docs/advanced/override_grammar/basic) syntax `'~cameras'`: +Importantly: If you don't have any camera, you can remove them dynamically with this [draccus](https://github.com/dlwh/draccus) syntax `--robot.cameras='{}'`: ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/koch.yaml \ - --robot-overrides \ - '~cameras' +python lerobot/scripts/control_robot.py \ + --robot.type=koch \ + --robot.cameras='{}' \ + --control.type=teleoperate ``` We advise to create a new yaml file when the command becomes too long. @@ -744,23 +728,23 @@ for _ in range(record_time_s * fps): Importantly, many utilities are still missing. For instance, if you have cameras, you will need to save the images on disk to not go out of RAM, and to do so in threads to not slow down communication with your robot. Also, you will need to store your data in a format optimized for training and web sharing like [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py). More on this in the next section. -### a. Use `koch.yaml` and the `record` function +### a. Use the `record` function You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to achieve efficient data recording. It encompasses many recording utilities: -1. Frames from cameras are saved on disk in threads, and encoded into videos at the end of recording. +1. Frames from cameras are saved on disk in threads, and encoded into videos at the end of each episode recording. 2. Video streams from cameras are displayed in window so that you can verify them. -3. Data is stored with [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py) format which is pushed to your Hugging Face page (unless `--push-to-hub 0` is provided). -4. Checkpoints are done during recording, so if any issue occurs, you can resume recording by re-running the same command again. You can also use `--force-override 1` to start recording from scratch. +3. Data is stored with [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py) format which is pushed to your Hugging Face page (unless `--control.push_to_hub=false` is provided). +4. Checkpoints are done during recording, so if any issue occurs, you can resume recording by re-running the same command again with `--control.resume=true`. You might need to add `--control.local_files_only=true` if your dataset was not uploaded to hugging face hub. Also you will need to manually delete the dataset directory to start recording from scratch. 5. Set the flow of data recording using command line arguments: - - `--warmup-time-s` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default). - - `--episode-time-s` defines the number of seconds for data recording for each episode (60 seconds by default). - - `--reset-time-s` defines the number of seconds for resetting the environment after each episode (60 seconds by default). - - `--num-episodes` defines the number of episodes to record (50 by default). + - `--control.warmup_time_s=10` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default). + - `--control.episode_time_s=60` defines the number of seconds for data recording for each episode (60 seconds by default). + - `--control.reset_time_s=60` defines the number of seconds for resetting the environment after each episode (60 seconds by default). + - `--control.num_episodes=50` defines the number of episodes to record (50 by default). 6. Control the flow during data recording using keyboard keys: - Press right arrow `->` at any time during episode recording to early stop and go to resetting. Same during resetting, to early stop and to go to the next episode recording. - Press left arrow `<-` at any time during episode recording or resetting to early stop, cancel the current episode, and re-record it. - Press escape `ESC` at any time during episode recording to end the session early and go straight to video encoding and dataset uploading. -7. Similarly to `teleoperate`, you can also use `--robot-path` and `--robot-overrides` to specify your robots. +7. Similarly to `teleoperate`, you can also use the command line to override anything. Before trying `record`, if you want to push your dataset to the hub, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens): ```bash @@ -771,27 +755,29 @@ Also, store your Hugging Face repository name in a variable (e.g. `cadene` or `l HF_USER=$(huggingface-cli whoami | head -n 1) echo $HF_USER ``` -If you don't want to push to hub, use `--push-to-hub 0`. +If you don't want to push to hub, use `--control.push_to_hub=false`. Now run this to record 2 episodes: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/koch.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/koch_test \ - --tags tutorial \ - --warmup-time-s 5 \ - --episode-time-s 30 \ - --reset-time-s 30 \ - --num-episodes 2 +python lerobot/scripts/control_robot.py \ + --robot.type=koch \ + --control.type=record \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/koch_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=2 \ + --control.push_to_hub=true ``` + This will write your dataset locally to `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example). You can look for other LeRobot datasets on the hub by searching for `LeRobot` tags: https://huggingface.co/datasets?other=LeRobot -Remember to add `--robot-overrides '~cameras'` if you don't have any cameras and you still use the default `koch.yaml` configuration. - You will see a lot of lines appearing like this one: ``` INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz) @@ -842,6 +828,8 @@ python lerobot/scripts/visualize_dataset_html.py \ --repo-id ${HF_USER}/koch_test ``` +Note: You might need to add `--local-files-only 1` if your dataset was not uploaded to hugging face hub. + This will launch a local web server that looks like this:
Koch v1.1 leader and follower arms @@ -853,13 +841,16 @@ A useful feature of [`lerobot/scripts/control_robot.py`](../lerobot/scripts/cont To replay the first episode of the dataset you just recorded, run the following command: ```bash -python lerobot/scripts/control_robot.py replay \ - --robot-path lerobot/configs/robot/koch.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/koch_test \ - --episode 0 +python lerobot/scripts/control_robot.py \ + --robot.type=koch \ + --control.type=replay \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/koch_test \ + --control.episode=0 ``` +Note: You might need to add `--control.local_files_only=true` if your dataset was not uploaded to hugging face hub. + Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com). ## 4. Train a policy on your data @@ -869,50 +860,19 @@ Your robot should replicate movements similar to those you recorded. For example To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash python lerobot/scripts/train.py \ - dataset_repo_id=${HF_USER}/koch_test \ - policy=act_koch_real \ - env=koch_real \ - hydra.run.dir=outputs/train/act_koch_test \ - hydra.job.name=act_koch_test \ - device=cuda \ - wandb.enable=true + --dataset.repo_id=${HF_USER}/koch_test \ + --policy.type=act \ + --output_dir=outputs/train/act_koch_test \ + --job_name=act_koch_test \ + --device=cuda \ + --wandb.enable=true ``` +Note: You might need to add `--dataset.local_files_only=true` if your dataset was not uploaded to hugging face hub. + Let's explain it: -1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/koch_test`. -2. We provided the policy with `policy=act_koch_real`. This loads configurations from [`lerobot/configs/policy/act_koch_real.yaml`](../lerobot/configs/policy/act_koch_real.yaml). Importantly, this policy uses 2 cameras as input `laptop` and `phone`. If your dataset has different cameras, update the yaml file to account for it in the following parts: -```yaml -... -override_dataset_stats: - observation.images.laptop: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.phone: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) -... - input_shapes: - observation.images.laptop: [3, 480, 640] - observation.images.phone: [3, 480, 640] -... - input_normalization_modes: - observation.images.laptop: mean_std - observation.images.phone: mean_std -... -``` -3. We provided an environment as argument with `env=koch_real`. This loads configurations from [`lerobot/configs/env/koch_real.yaml`](../lerobot/configs/env/koch_real.yaml). It looks like -```yaml -fps: 30 -env: - name: real_world - task: null - state_dim: 6 - action_dim: 6 - fps: ${fps} -``` -It should match your dataset (e.g. `fps: 30`) and your robot (e.g. `state_dim: 6` and `action_dim: 6`). We are still working on simplifying this in future versions of `lerobot`. +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/koch_test`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. @@ -946,7 +906,6 @@ fps = 30 device = "cuda" # TODO: On Mac, use "mps" or "cpu" ckpt_path = "outputs/train/act_koch_test/checkpoints/last/pretrained_model" -# TODO(alibert, rcadene): fix this file policy = ACTPolicy.from_pretrained(ckpt_path) policy.to(device) @@ -979,34 +938,36 @@ for _ in range(inference_time_s * fps): busy_wait(1 / fps - dt_s) ``` -### a. Use `koch.yaml` and our `record` function +### a. Use our `record` function Ideally, when controlling your robot with your neural network, you would want to record evaluation episodes and to be able to visualize them later on, or even train on them like in Reinforcement Learning. This pretty much corresponds to recording a new dataset but with a neural network providing the actions instead of teleoperation. To this end, you can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/koch.yaml \ - --fps 30 \ - --repo-id ${HF_USER}/eval_koch_test \ - --tags tutorial eval \ - --warmup-time-s 5 \ - --episode-time-s 30 \ - --reset-time-s 30 \ - --num-episodes 10 \ - -p outputs/train/act_koch_test/checkpoints/last/pretrained_model +python lerobot/scripts/control_robot.py \ + --robot.type=koch \ + --control.type=record \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/eval_act_koch_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=10 \ + --control.push_to_hub=true \ + --control.policy.path=outputs/train/act_koch_test/checkpoints/last/pretrained_model ``` As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: -1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_koch_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_koch_test`). -2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_koch_test`). +1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_koch_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_koch_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_koch_test`). ### b. Visualize evaluation afterwards You can then visualize your evaluation dataset by running the same command as before but with the new inference dataset as argument: ```bash python lerobot/scripts/visualize_dataset.py \ - --repo-id ${HF_USER}/eval_koch_test + --repo-id ${HF_USER}/eval_act_koch_test ``` ## 6. Next step diff --git a/examples/8_use_stretch.md b/examples/8_use_stretch.md index c2c306f071..2f8c0ffbb7 100644 --- a/examples/8_use_stretch.md +++ b/examples/8_use_stretch.md @@ -92,8 +92,9 @@ Serial Number = stretch-se3-3054 **Calibrate (Optional)** Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command: ```bash -python lerobot/scripts/control_robot.py calibrate \ - --robot-path lerobot/configs/robot/stretch.yaml +python lerobot/scripts/control_robot.py \ + --robot.type=stretch \ + --control.type=calibrate ``` This is equivalent to running `stretch_robot_home.py` @@ -104,8 +105,9 @@ Before trying teleoperation, you need activate the gamepad controller by pressin Now try out teleoperation (see above documentation to learn about the gamepad controls): ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/stretch.yaml +python lerobot/scripts/control_robot.py \ + --robot.type=stretch \ + --control.type=teleoperate ``` This is essentially the same as running `stretch_gamepad_teleop.py` @@ -125,16 +127,18 @@ echo $HF_USER Record one episode: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/stretch.yaml \ - --fps 20 \ - --repo-id ${HF_USER}/stretch_test \ - --tags stretch tutorial \ - --warmup-time-s 3 \ - --episode-time-s 40 \ - --reset-time-s 10 \ - --num-episodes 1 \ - --push-to-hub 0 +python lerobot/scripts/control_robot.py \ + --robot.type=stretch \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/stretch_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=2 \ + --control.push_to_hub=true ``` > **Note:** If you're using ssh to connect to Stretch and run this script, you won't be able to visualize its cameras feed (though they will still be recording). To see the cameras stream, use [tethered](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) or [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup). @@ -142,11 +146,12 @@ python lerobot/scripts/control_robot.py record \ **Replay an episode** Now try to replay this episode (make sure the robot's initial position is the same): ```bash -python lerobot/scripts/control_robot.py replay \ - --robot-path lerobot/configs/robot/stretch.yaml \ - --fps 20 \ - --repo-id ${HF_USER}/stretch_test \ - --episode 0 +python lerobot/scripts/control_robot.py \ + --robot.type=stretch \ + --control.type=replay \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/stretch_test \ + --control.episode=0 ``` Follow [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) to train a policy on your data and run inference on your robot. You will need to adapt the code for Stretch. diff --git a/examples/9_use_aloha.md b/examples/9_use_aloha.md index f531a2c1d5..d74c8b7aca 100644 --- a/examples/9_use_aloha.md +++ b/examples/9_use_aloha.md @@ -51,16 +51,18 @@ Teleoperation consists in manually operating the leader arms to move the followe By running the following code, you can start your first **SAFE** teleoperation: ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/aloha.yaml \ - --robot-overrides max_relative_target=5 +python lerobot/scripts/control_robot.py \ + --robot.type=aloha \ + --robot.max_relative_target=5 \ + --control.type=teleoperate ``` -By adding `--robot-overrides max_relative_target=5`, we override the default value for `max_relative_target` defined in `lerobot/configs/robot/aloha.yaml`. It is expected to be `5` to limit the magnitude of the movement for more safety, but the teleoperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot-overrides max_relative_target=null` to the command line: +By adding `--robot.max_relative_target=5`, we override the default value for `max_relative_target` defined in [`AlohaRobotConfig`](lerobot/common/robot_devices/robots/configs.py). It is expected to be `5` to limit the magnitude of the movement for more safety, but the teleoperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot.max_relative_target=null` to the command line: ```bash -python lerobot/scripts/control_robot.py teleoperate \ - --robot-path lerobot/configs/robot/aloha.yaml \ - --robot-overrides max_relative_target=null +python lerobot/scripts/control_robot.py \ + --robot.type=aloha \ + --robot.max_relative_target=null \ + --control.type=teleoperate ``` ## Record a dataset @@ -80,27 +82,29 @@ echo $HF_USER Record 2 episodes and upload your dataset to the hub: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/aloha.yaml \ - --robot-overrides max_relative_target=null \ - --fps 30 \ - --repo-id ${HF_USER}/aloha_test \ - --tags aloha tutorial \ - --warmup-time-s 5 \ - --episode-time-s 40 \ - --reset-time-s 10 \ - --num-episodes 2 \ - --push-to-hub 1 +python lerobot/scripts/control_robot.py \ + --robot.type=aloha \ + --robot.max_relative_target=null \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/aloha_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=2 \ + --control.push_to_hub=true ``` ## Visualize a dataset -If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: +If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: ```bash echo ${HF_USER}/aloha_test ``` -If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with: +If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with: ```bash python lerobot/scripts/visualize_dataset_html.py \ --repo-id ${HF_USER}/aloha_test @@ -109,16 +113,17 @@ python lerobot/scripts/visualize_dataset_html.py \ ## Replay an episode **/!\ FOR SAFETY, READ THIS /!\** -Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot-overrides max_relative_target=5` to your command line as explained above. +Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot.max_relative_target=5` to your command line as explained above. Now try to replay the first episode on your robot: ```bash -python lerobot/scripts/control_robot.py replay \ - --robot-path lerobot/configs/robot/aloha.yaml \ - --robot-overrides max_relative_target=null \ - --fps 30 \ - --repo-id ${HF_USER}/aloha_test \ - --episode 0 +python lerobot/scripts/control_robot.py \ + --robot.type=aloha \ + --robot.max_relative_target=null \ + --control.type=replay \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/aloha_test \ + --control.episode=0 ``` ## Train a policy @@ -126,46 +131,48 @@ python lerobot/scripts/control_robot.py replay \ To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash python lerobot/scripts/train.py \ - dataset_repo_id=${HF_USER}/aloha_test \ - policy=act_aloha_real \ - env=aloha_real \ - hydra.run.dir=outputs/train/act_aloha_test \ - hydra.job.name=act_aloha_test \ - device=cuda \ - wandb.enable=true + --dataset.repo_id=${HF_USER}/aloha_test \ + --policy.type=act \ + --output_dir=outputs/train/act_aloha_test \ + --job_name=act_aloha_test \ + --device=cuda \ + --wandb.enable=true ``` Let's explain it: -1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/aloha_test`. -2. We provided the policy with `policy=act_aloha_real`. This loads configurations from [`lerobot/configs/policy/act_aloha_real.yaml`](../lerobot/configs/policy/act_aloha_real.yaml). Importantly, this policy uses 4 cameras as input `cam_right_wrist`, `cam_left_wrist`, `cam_high`, and `cam_low`. -3. We provided an environment as argument with `env=aloha_real`. This loads configurations from [`lerobot/configs/env/aloha_real.yaml`](../lerobot/configs/env/aloha_real.yaml). Note: this yaml defines 18 dimensions for the `state_dim` and `action_dim`, corresponding to 18 motors, not 14 motors as used in previous Aloha work. This is because, we include the `shoulder_shadow` and `elbow_shadow` motors for simplicity. -4. We provided `device=cuda` since we are training on a Nvidia GPU. +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/aloha_test`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. +For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md) + Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`. ## Evaluate your policy You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: ```bash -python lerobot/scripts/control_robot.py record \ - --robot-path lerobot/configs/robot/aloha.yaml \ - --robot-overrides max_relative_target=null \ - --fps 30 \ - --repo-id ${HF_USER}/eval_act_aloha_test \ - --tags aloha tutorial eval \ - --warmup-time-s 5 \ - --episode-time-s 40 \ - --reset-time-s 10 \ - --num-episodes 10 \ - --num-image-writer-processes 1 \ - -p outputs/train/act_aloha_test/checkpoints/last/pretrained_model +python lerobot/scripts/control_robot.py \ + --robot.type=aloha \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/eval_act_aloha_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=10 \ + --control.push_to_hub=true \ + --control.policy.path=outputs/train/act_aloha_test/checkpoints/last/pretrained_model \ + --control.num_image_writer_processes=1 ``` As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: -1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_aloha_test`). -2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_aloha_test`). -3. We use `--num-image-writer-processes 1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--num-image-writer-processes`. +1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_aloha_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_aloha_test`). +3. We use `--control.num_image_writer_processes=1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--control.num_image_writer_processes`. ## More diff --git a/lerobot/common/robot_devices/cameras/intelrealsense.py b/lerobot/common/robot_devices/cameras/intelrealsense.py index affb5e6249..7e65dba9d0 100644 --- a/lerobot/common/robot_devices/cameras/intelrealsense.py +++ b/lerobot/common/robot_devices/cameras/intelrealsense.py @@ -11,7 +11,6 @@ import time import traceback from collections import Counter -from dataclasses import replace from pathlib import Path from threading import Thread @@ -95,7 +94,10 @@ def save_images_from_cameras( cameras = [] for cam_sn in serial_numbers: print(f"{cam_sn=}") - camera = IntelRealSenseCamera(serial_number=cam_sn, fps=fps, width=width, height=height, mock=mock) + config = IntelRealSenseCameraConfig( + serial_number=cam_sn, fps=fps, width=width, height=height, mock=mock + ) + camera = IntelRealSenseCamera(config) camera.connect() print( f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})" @@ -165,33 +167,35 @@ class IntelRealSenseCamera: When an IntelRealSenseCamera is instantiated, if no specific config is provided, the default fps, width, height and color_mode of the given camera will be used. - Example of usage: + Example of instantiating with a serial number: ```python - # Instantiate with its serial number - camera = IntelRealSenseCamera(serial_number=128422271347) - # Or by its name if it's unique - camera = IntelRealSenseCamera.init_from_name("Intel RealSense D405") + from lerobot.common.robot_devices.cameras.configs import IntelRealSenseCameraConfig + + config = IntelRealSenseCameraConfig(serial_number=128422271347) + camera = IntelRealSenseCamera(config) camera.connect() color_image = camera.read() # when done using the camera, consider disconnecting camera.disconnect() ``` + Example of instantiating with a name if it's unique: + ``` + config = IntelRealSenseCameraConfig(name="Intel RealSense D405") + ``` + Example of changing default fps, width, height and color_mode: ```python - camera = IntelRealSenseCamera(serial_number=128422271347, fps=30, width=1280, height=720) - camera = connect() # applies the settings, might error out if these settings are not compatible with the camera - - camera = IntelRealSenseCamera(serial_number=128422271347, fps=90, width=640, height=480) - camera = connect() - - camera = IntelRealSenseCamera(serial_number=128422271347, fps=90, width=640, height=480, color_mode="bgr") - camera = connect() + config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=30, width=1280, height=720) + config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=90, width=640, height=480) + config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=90, width=640, height=480, color_mode="bgr") + # Note: might error out upon `camera.connect()` if these settings are not compatible with the camera ``` Example of returning depth: ```python - camera = IntelRealSenseCamera(serial_number=128422271347, use_depth=True) + config = IntelRealSenseCameraConfig(serial_number=128422271347, use_depth=True) + camera = IntelRealSenseCamera(config) camera.connect() color_image, depth_map = camera.read() ``` @@ -199,12 +203,13 @@ class IntelRealSenseCamera: def __init__( self, - config: IntelRealSenseCameraConfig | None = None, - **kwargs, + config: IntelRealSenseCameraConfig, ): - config = IntelRealSenseCameraConfig(**kwargs) if config is None else replace(config, **kwargs) - - self.serial_number = config.serial_number + self.config = config + if config.name is not None: + self.serial_number = self.find_serial_number_from_name(config.name) + else: + self.serial_number = config.serial_number self.fps = config.fps self.width = config.width self.height = config.height @@ -236,8 +241,7 @@ def __init__( elif config.rotation == 180: self.rotation = cv2.ROTATE_180 - @classmethod - def init_from_name(cls, name: str, config: IntelRealSenseCameraConfig | None = None, **kwargs): + def find_serial_number_from_name(self, name): camera_infos = find_cameras() camera_names = [cam["name"] for cam in camera_infos] this_name_count = Counter(camera_names)[name] @@ -250,13 +254,7 @@ def init_from_name(cls, name: str, config: IntelRealSenseCameraConfig | None = N name_to_serial_dict = {cam["name"]: cam["serial_number"] for cam in camera_infos} cam_sn = name_to_serial_dict[name] - if config is None: - config = IntelRealSenseCameraConfig() - - # Overwrite the config arguments using kwargs - config = replace(config, **kwargs) - - return cls(serial_number=cam_sn, config=config, **kwargs) + return cam_sn def connect(self): if self.is_connected: diff --git a/lerobot/common/robot_devices/cameras/opencv.py b/lerobot/common/robot_devices/cameras/opencv.py index 99f1ec7821..93c791fa28 100644 --- a/lerobot/common/robot_devices/cameras/opencv.py +++ b/lerobot/common/robot_devices/cameras/opencv.py @@ -9,7 +9,6 @@ import shutil import threading import time -from dataclasses import replace from pathlib import Path from threading import Thread @@ -127,7 +126,8 @@ def save_images_from_cameras( print("Connecting cameras") cameras = [] for cam_idx in camera_ids: - camera = OpenCVCamera(camera_index=cam_idx, fps=fps, width=width, height=height, mock=mock) + config = OpenCVCameraConfig(camera_index=cam_idx, fps=fps, width=width, height=height, mock=mock) + camera = OpenCVCamera(config) camera.connect() print( f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, " @@ -195,7 +195,10 @@ class OpenCVCamera: Example of usage: ```python - camera = OpenCVCamera(camera_index=0) + from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig + + config = OpenCVCameraConfig(camera_index=0) + camera = OpenCVCamera(config) camera.connect() color_image = camera.read() # when done using the camera, consider disconnecting @@ -204,20 +207,15 @@ class OpenCVCamera: Example of changing default fps, width, height and color_mode: ```python - camera = OpenCVCamera(camera_index=0, fps=30, width=1280, height=720) - camera = connect() # applies the settings, might error out if these settings are not compatible with the camera - - camera = OpenCVCamera(camera_index=0, fps=90, width=640, height=480) - camera = connect() - - camera = OpenCVCamera(camera_index=0, fps=90, width=640, height=480, color_mode="bgr") - camera = connect() + config = OpenCVCameraConfig(camera_index=0, fps=30, width=1280, height=720) + config = OpenCVCameraConfig(camera_index=0, fps=90, width=640, height=480) + config = OpenCVCameraConfig(camera_index=0, fps=90, width=640, height=480, color_mode="bgr") + # Note: might error out open `camera.connect()` if these settings are not compatible with the camera ``` """ - def __init__(self, config: OpenCVCameraConfig | None = None, **kwargs): - config = OpenCVCameraConfig(**kwargs) if config is None else replace(config, **kwargs) - + def __init__(self, config: OpenCVCameraConfig): + self.config = config self.camera_index = config.camera_index self.port = None diff --git a/lerobot/common/robot_devices/cameras/utils.py b/lerobot/common/robot_devices/cameras/utils.py index 73675fc250..88288ea3f3 100644 --- a/lerobot/common/robot_devices/cameras/utils.py +++ b/lerobot/common/robot_devices/cameras/utils.py @@ -2,7 +2,11 @@ import numpy as np -from lerobot.common.robot_devices.cameras.configs import CameraConfig +from lerobot.common.robot_devices.cameras.configs import ( + CameraConfig, + IntelRealSenseCameraConfig, + OpenCVCameraConfig, +) # Defines a camera type @@ -36,12 +40,14 @@ def make_camera(camera_type, **kwargs) -> Camera: if camera_type == "opencv": from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera - return OpenCVCamera(**kwargs) + config = OpenCVCameraConfig(**kwargs) + return OpenCVCamera(config) elif camera_type == "intelrealsense": from lerobot.common.robot_devices.cameras.intelrealsense import IntelRealSenseCamera - return IntelRealSenseCamera(**kwargs) + config = IntelRealSenseCameraConfig(**kwargs) + return IntelRealSenseCamera(config) else: raise ValueError(f"The camera type '{camera_type}' is not valid.") diff --git a/lerobot/common/robot_devices/control_configs.py b/lerobot/common/robot_devices/control_configs.py index 24d4546161..418265d94a 100644 --- a/lerobot/common/robot_devices/control_configs.py +++ b/lerobot/common/robot_devices/control_configs.py @@ -1,11 +1,14 @@ +import logging from dataclasses import dataclass from pathlib import Path import draccus from lerobot.common.robot_devices.robots.configs import RobotConfig +from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available from lerobot.configs import parser from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.train import TrainPipelineConfig @dataclass @@ -16,7 +19,7 @@ class ControlConfig(draccus.ChoiceRegistry): @ControlConfig.register_subclass("calibrate") @dataclass class CalibrateControlConfig(ControlConfig): - # List of arms to calibrate (e.g. `--arms left_follower right_follower left_leader`) + # List of arms to calibrate (e.g. `--arms='["left_follower","right_follower"]' left_leader`) arms: list[str] | None = None @@ -27,7 +30,7 @@ class TeleoperateControlConfig(ControlConfig): fps: int | None = None teleop_time_s: float | None = None # Display all cameras on screen - display_cameras: bool = False + display_cameras: bool = True @ControlConfig.register_subclass("record") @@ -92,11 +95,26 @@ def __post_init__(self): self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path - if self.policy is not None: - if self.device is None: - raise ValueError("Set one of the following device: cuda, cpu or mps") - elif self.device == "cuda" and self.use_amp is None: - raise ValueError("Set 'use_amp' to True or False.") + # When no device or use_amp are given, use the one from training config. + if self.device is None or self.use_amp is None: + train_cfg = TrainPipelineConfig.from_pretrained(policy_path) + if self.device is None: + self.device = train_cfg.device + if self.use_amp is None: + self.use_amp = train_cfg.use_amp + + # Automatically switch to available device if necessary + if not is_torch_device_available(self.device): + auto_device = auto_select_torch_device() + logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.") + self.device = auto_device + + # Automatically deactivate AMP if necessary + if self.use_amp and not is_amp_available(self.device): + logging.warning( + f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP." + ) + self.use_amp = False @ControlConfig.register_subclass("replay") diff --git a/lerobot/common/robot_devices/motors/dynamixel.py b/lerobot/common/robot_devices/motors/dynamixel.py index bbadf2ba70..54836d8ecb 100644 --- a/lerobot/common/robot_devices/motors/dynamixel.py +++ b/lerobot/common/robot_devices/motors/dynamixel.py @@ -4,7 +4,6 @@ import time import traceback from copy import deepcopy -from dataclasses import replace import numpy as np import tqdm @@ -254,7 +253,6 @@ def __init__(self, message="Joint is out of range"): class DynamixelMotorsBus: - # TODO(rcadene): Add a script to find the motor indices without DynamixelWizzard2 """ The DynamixelMotorsBus class allows to efficiently read and write to the attached motors. It relies on the python dynamixel sdk to communicate with the motors. For more info, see the [Dynamixel SDK Documentation](https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20). @@ -276,10 +274,11 @@ class DynamixelMotorsBus: motor_index = 6 motor_model = "xl330-m288" - motors_bus = DynamixelMotorsBus( + config = DynamixelMotorsBusConfig( port="/dev/tty.usbmodem575E0031751", motors={motor_name: (motor_index, motor_model)}, ) + motors_bus = DynamixelMotorsBus(config) motors_bus.connect() position = motors_bus.read("Present_Position") @@ -295,24 +294,14 @@ class DynamixelMotorsBus: def __init__( self, - config: DynamixelMotorsBusConfig | None = None, - extra_model_control_table: dict[str, list[tuple]] | None = None, - extra_model_resolution: dict[str, int] | None = None, - **kwargs, + config: DynamixelMotorsBusConfig, ): - config = DynamixelMotorsBusConfig(**kwargs) if config is None else replace(config, **kwargs) - self.port = config.port self.motors = config.motors self.mock = config.mock self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) - if extra_model_control_table: - self.model_ctrl_table.update(extra_model_control_table) - self.model_resolution = deepcopy(MODEL_RESOLUTION) - if extra_model_resolution: - self.model_resolution.update(extra_model_resolution) self.port_handler = None self.packet_handler = None diff --git a/lerobot/common/robot_devices/motors/feetech.py b/lerobot/common/robot_devices/motors/feetech.py index b915f47e26..fd5e877bf6 100644 --- a/lerobot/common/robot_devices/motors/feetech.py +++ b/lerobot/common/robot_devices/motors/feetech.py @@ -4,7 +4,6 @@ import time import traceback from copy import deepcopy -from dataclasses import replace import numpy as np import tqdm @@ -254,10 +253,11 @@ class FeetechMotorsBus: motor_index = 6 motor_model = "sts3215" - motors_bus = FeetechMotorsBus( + config = FeetechMotorsBusConfig( port="/dev/tty.usbmodem575E0031751", motors={motor_name: (motor_index, motor_model)}, ) + motors_bus = FeetechMotorsBus(config) motors_bus.connect() position = motors_bus.read("Present_Position") @@ -273,24 +273,14 @@ class FeetechMotorsBus: def __init__( self, - config: FeetechMotorsBusConfig | None = None, - extra_model_control_table: dict[str, list[tuple]] | None = None, - extra_model_resolution: dict[str, int] | None = None, - **kwargs, + config: FeetechMotorsBusConfig, ): - config = FeetechMotorsBusConfig(**kwargs) if config is None else replace(config, **kwargs) - self.port = config.port self.motors = config.motors self.mock = config.mock self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) - if extra_model_control_table: - self.model_ctrl_table.update(extra_model_control_table) - self.model_resolution = deepcopy(MODEL_RESOLUTION) - if extra_model_resolution: - self.model_resolution.update(extra_model_resolution) self.port_handler = None self.packet_handler = None diff --git a/lerobot/common/robot_devices/motors/utils.py b/lerobot/common/robot_devices/motors/utils.py index fb625339fd..fc64f050f0 100644 --- a/lerobot/common/robot_devices/motors/utils.py +++ b/lerobot/common/robot_devices/motors/utils.py @@ -1,6 +1,10 @@ from typing import Protocol -from lerobot.common.robot_devices.motors.configs import MotorsBusConfig +from lerobot.common.robot_devices.motors.configs import ( + DynamixelMotorsBusConfig, + FeetechMotorsBusConfig, + MotorsBusConfig, +) class MotorsBus(Protocol): @@ -36,12 +40,14 @@ def make_motors_bus(motor_type: str, **kwargs) -> MotorsBus: if motor_type == "dynamixel": from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus - return DynamixelMotorsBus(**kwargs) + config = DynamixelMotorsBusConfig(**kwargs) + return DynamixelMotorsBus(config) elif motor_type == "feetech": from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus - return FeetechMotorsBus(**kwargs) + config = FeetechMotorsBusConfig(**kwargs) + return FeetechMotorsBus(config) else: raise ValueError(f"The motor type '{motor_type}' is not valid.") diff --git a/lerobot/common/robot_devices/robots/configs.py b/lerobot/common/robot_devices/robots/configs.py index 6d615b2879..a976f601c2 100644 --- a/lerobot/common/robot_devices/robots/configs.py +++ b/lerobot/common/robot_devices/robots/configs.py @@ -85,7 +85,7 @@ class AlohaRobotConfig(ManipulatorRobotConfig): # Also, everything is expected to work safely out-of-the-box, but we highly advise to # first try to teleoperate the grippers only (by commenting out the rest of the motors in this yaml), # then to gradually add more motors (by uncommenting), until you can teleoperate both arms fully - max_relative_target: int = 5 + max_relative_target: int | None = 5 leader_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { diff --git a/lerobot/common/robot_devices/robots/manipulator.py b/lerobot/common/robot_devices/robots/manipulator.py index a46d7dc058..ceeaa9a88e 100644 --- a/lerobot/common/robot_devices/robots/manipulator.py +++ b/lerobot/common/robot_devices/robots/manipulator.py @@ -8,7 +8,6 @@ import logging import time import warnings -from dataclasses import replace from pathlib import Path import numpy as np @@ -144,11 +143,8 @@ class ManipulatorRobot: def __init__( self, config: ManipulatorRobotConfig, - **kwargs, ): - # Overwrite config arguments using kwargs - self.config = replace(config, **kwargs) - + self.config = config self.robot_type = self.config.type self.calibration_dir = Path(self.config.calibration_dir) self.leader_arms = make_motors_buses_from_configs(self.config.leader_arms) diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index 329b8b8a21..d766089b5c 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -74,6 +74,26 @@ def get_safe_torch_device(try_device: str, log: bool = False) -> torch.device: return device +def is_torch_device_available(try_device: str) -> bool: + if try_device == "cuda": + return torch.cuda.is_available() + elif try_device == "mps": + return torch.backends.mps.is_available() + elif try_device == "cpu": + return True + else: + raise ValueError(f"Unknown device '{try_device}.") + + +def is_amp_available(device: str): + if device in ["cuda", "cpu"]: + return True + elif device == "mps": + return False + else: + raise ValueError(f"Unknown device '{device}.") + + def get_global_random_state() -> dict[str, Any]: """Get the random state for `random`, `numpy`, and `torch`.""" random_state_dict = { diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index c95929e136..10467fda2c 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -4,7 +4,7 @@ from pathlib import Path from lerobot.common import envs, policies # noqa: F401 -from lerobot.common.utils.utils import auto_select_torch_device +from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available from lerobot.configs import parser from lerobot.configs.default import EvalConfig from lerobot.configs.policies import PreTrainedConfig @@ -46,17 +46,28 @@ def __post_init__(self): cli_overrides = parser.get_cli_overrides("policy") self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path - train_cfg = TrainPipelineConfig.from_pretrained(policy_path) - if self.use_amp != train_cfg.use_amp: - raise ValueError( - f"The policy you are trying to load has been trained with use_amp={train_cfg.use_amp} " - f"but you're trying to evaluate it with use_amp={self.use_amp}" - ) - if self.device == "mps" and train_cfg.device == "cuda": + + # When no device or use_amp are given, use the one from training config. + if self.device is None or self.use_amp is None: + train_cfg = TrainPipelineConfig.from_pretrained(policy_path) + if self.device is None: + self.device = train_cfg.device + if self.use_amp is None: + self.use_amp = train_cfg.use_amp + + # Automatically switch to available device if necessary + if not is_torch_device_available(self.device): + auto_device = auto_select_torch_device() + logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.") + self.device = auto_device + + # Automatically deactivate AMP if necessary + if self.use_amp and not is_amp_available(self.device): logging.warning( - "You are loading a policy that has been trained with a Cuda kernel on a Metal backend." - "This is lilely to produced unexpected results due to differences between these two kernels." + f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP." ) + self.use_amp = False + else: logging.warning( "No pretrained path was provided, evaluated policy will be built from scratch (random weights)." diff --git a/lerobot/scripts/control_robot.py b/lerobot/scripts/control_robot.py index c7401ba8e6..3fdb0acc38 100644 --- a/lerobot/scripts/control_robot.py +++ b/lerobot/scripts/control_robot.py @@ -17,12 +17,12 @@ ```bash python lerobot/scripts/control_robot.py \ --robot.type=so100 \ + --robot.cameras='{}' \ --control.type=teleoperate -# Remove the cameras from the robot definition. They are not used in 'teleoperate' anyway. +# Add the cameras from the robot definition to visualize them: python lerobot/scripts/control_robot.py \ --robot.type=so100 \ - --robot.cameras='{}' \ --control.type=teleoperate ``` @@ -40,9 +40,10 @@ --robot.type=so100 \ --control.type=record \ --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ --control.repo_id=$USER/koch_test \ --control.num_episodes=1 \ - --control.run_compute_stats=False + --control.push_to_hub=True ``` - Visualize dataset: @@ -57,9 +58,9 @@ python lerobot/scripts/control_robot.py replay \ --robot.type=so100 \ --control.type=replay \ - --control.fps 30 \ - --control.repo-id $USER/koch_test \ - --control.episode 0 + --control.fps=30 \ + --control.repo_id=$USER/koch_test \ + --control.episode=0 ``` - Record a full dataset in order to train a policy, with 2 seconds of warmup, @@ -83,17 +84,18 @@ - Tap escape key 'esc' to stop the data recording. This might require a sudo permission to allow your terminal to monitor keyboard events. -**NOTE**: You can resume/continue data recording by running the same data recording command and adding `--resume 1`. -If the dataset you want to extend is not on the hub, you also need to add `--local-files-only 1`. +**NOTE**: You can resume/continue data recording by running the same data recording command and adding `--control.resume=true`. +If the dataset you want to extend is not on the hub, you also need to add `--control.local_files_only=true`. - Train on this dataset with the ACT policy: ```bash -# TODO(rcadene): fix python lerobot/scripts/train.py \ - policy=act_koch_real \ - env=koch_real \ - dataset_repo_id=$USER/koch_pick_place_lego \ - hydra.run.dir=outputs/train/act_koch_real + --dataset.repo_id=${HF_USER}/koch_pick_place_lego \ + --policy.type=act \ + --output_dir=outputs/train/act_koch_pick_place_lego \ + --job_name=act_koch_pick_place_lego \ + --device=cuda \ + --wandb.enable=true ``` - Run the pretrained policy on the robot: @@ -102,19 +104,21 @@ --robot.type=so100 \ --control.type=record \ --control.fps=30 \ - --control.repo_id=$USER/eval_act_koch_real \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=$USER/eval_act_koch_pick_place_lego \ --control.num_episodes=10 \ --control.warmup_time_s=2 \ --control.episode_time_s=30 \ - --control.reset_time_s=10 - --control.pretrained_policy_path=outputs/train/act_koch_real/checkpoints/080000/pretrained_model + --control.reset_time_s=10 \ + --control.push_to_hub=true \ + --control.policy.path=outputs/train/act_koch_pick_place_lego/checkpoints/080000/pretrained_model ``` """ import logging import time - -import draccus +from dataclasses import asdict +from pprint import pformat # from safetensors.torch import load_file, save_file from lerobot.common.datasets.lerobot_dataset import LeRobotDataset @@ -140,6 +144,7 @@ from lerobot.common.robot_devices.robots.utils import Robot, make_robot_from_config from lerobot.common.robot_devices.utils import busy_wait, safe_disconnect from lerobot.common.utils.utils import has_method, init_logging, log_say +from lerobot.configs import parser ######################################################################################## # Control modes @@ -213,10 +218,11 @@ def record( root=cfg.root, local_files_only=cfg.local_files_only, ) - dataset.start_image_writer( - num_processes=cfg.num_image_writer_processes, - num_threads=cfg.num_image_writer_threads_per_camera * len(robot.cameras), - ) + if len(robot.cameras) > 0: + dataset.start_image_writer( + num_processes=cfg.num_image_writer_processes, + num_threads=cfg.num_image_writer_threads_per_camera * len(robot.cameras), + ) sanity_check_dataset_robot_compatibility(dataset, robot, cfg.fps, cfg.video) else: # Create empty dataset or load existing saved episodes @@ -336,9 +342,10 @@ def replay( log_control_info(robot, dt_s, fps=cfg.fps) -@draccus.wrap() +@parser.wrap() def control_robot(cfg: ControlPipelineConfig): init_logging() + logging.info(pformat(asdict(cfg))) robot = make_robot_from_config(cfg.robot) diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 733e525ae1..8d46bb6e39 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -229,7 +229,8 @@ def train(cfg: TrainPipelineConfig): num_total_params = sum(p.numel() for p in policy.parameters()) log_output_dir(cfg.output_dir) - logging.info(f"{cfg.env.task=}") + if cfg.env is not None: + logging.info(f"{cfg.env.task=}") logging.info(f"{cfg.offline.steps=} ({format_big_number(cfg.offline.steps)})") logging.info(f"{cfg.online.steps=}") logging.info(f"{offline_dataset.num_frames=} ({format_big_number(offline_dataset.num_frames)})") From aa65bb7619e5bcbe838541bbf0af65a738a13a91 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 16:35:07 +0100 Subject: [PATCH 80/94] Add pusht hack --- lerobot/common/datasets/factory.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 58ff400e46..f86d242e9b 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -32,6 +32,18 @@ "std": [[[0.229]], [[0.224]], [[0.225]]], # (c,1,1) } +# Stats used in the original Diffusion repo +PUSHT_STATS = { + "observation.state": { + "min": [13.456424, 32.938293], + "max": [496.14618, 510.9579], + }, + "action": { + "min": [12.0, 25.0], + "max": [511.0, 511.0], + }, +} + def resolve_delta_timestamps( cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata @@ -112,4 +124,11 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas for stats_type, stats in IMAGENET_STATS.items(): dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) + # HACK for pusht + if cfg.dataset.repo_id.startswith("lerobot/pusht"): + for key in PUSHT_STATS: + if key in dataset.meta.features: + for stats_type, stats in PUSHT_STATS[key].items(): + dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) + return dataset From 584691c7b255ab26ee0f575b1393f071046b349c Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 16:42:58 +0100 Subject: [PATCH 81/94] Fix --- lerobot/common/utils/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index d766089b5c..b605e7096e 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -43,10 +43,10 @@ def inside_slurm(): def auto_select_torch_device() -> torch.device: """Tries to select automatically a torch device.""" if torch.cuda.is_available(): - logging.log("Cuda backend detected, using cuda.") + print("Cuda backend detected, using cuda.") return torch.device("cuda") elif torch.backends.mps.is_available(): - logging.log("Metal backend detected, using cuda.") + print("Metal backend detected, using cuda.") return torch.device("mps") else: logging.warning("No accelerated backend detected. Using default cpu, this will be slow.") From bac217c4a944781dff306cdd10322333874ae7ae Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 17:10:10 +0100 Subject: [PATCH 82/94] Fix logging --- lerobot/common/utils/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index b605e7096e..fda3edd9f3 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -43,10 +43,10 @@ def inside_slurm(): def auto_select_torch_device() -> torch.device: """Tries to select automatically a torch device.""" if torch.cuda.is_available(): - print("Cuda backend detected, using cuda.") + logging.info("Cuda backend detected, using cuda.") return torch.device("cuda") elif torch.backends.mps.is_available(): - print("Metal backend detected, using cuda.") + logging.info("Metal backend detected, using cuda.") return torch.device("mps") else: logging.warning("No accelerated backend detected. Using default cpu, this will be slow.") From 742848b6c9b6adc6e244b294b528cd0499112b86 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 17:32:49 +0100 Subject: [PATCH 83/94] Fix logging --- lerobot/scripts/eval.py | 2 +- lerobot/scripts/train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 30b8003a9a..0c765cb964 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -446,7 +446,6 @@ def _compile_episode_data( @parser.wrap() def eval(cfg: EvalPipelineConfig): - init_logging() logging.info(pformat(asdict(cfg))) # Check device is available @@ -490,4 +489,5 @@ def eval(cfg: EvalPipelineConfig): if __name__ == "__main__": + init_logging() eval() diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 8d46bb6e39..d8136681e4 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -184,7 +184,6 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): def train(cfg: TrainPipelineConfig): cfg.validate() - init_logging() logging.info(pformat(asdict(cfg))) # log metrics to terminal and wandb @@ -547,4 +546,5 @@ def sample_trajectory_and_update_buffer(): if __name__ == "__main__": + init_logging() train() From cb184171e18d1ba315f6909ce42bb0ab375c7f0a Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 17:33:30 +0100 Subject: [PATCH 84/94] Fix policy factory --- lerobot/common/policies/factory.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 455565a196..fb5b1159dd 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -16,6 +16,7 @@ import logging +import torch from torch import nn from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata @@ -68,7 +69,7 @@ def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig: def make_policy( cfg: PreTrainedConfig, - device: str, + device: str | torch.device, ds_meta: LeRobotDatasetMetadata | None = None, env_cfg: EnvConfig | None = None, ) -> PreTrainedPolicy: @@ -93,7 +94,7 @@ def make_policy( Returns: PreTrainedPolicy: _description_ """ - if not (ds_meta is None) ^ (env_cfg is None): + if bool(ds_meta) == bool(env_cfg): raise ValueError("Either one of a dataset metadata or a sim env must be provided.") # NOTE: Currently, if you try to run vqbet with mps backend, you'll get this error. @@ -103,7 +104,7 @@ def make_policy( # https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment # variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be # slower than running natively on MPS. - if cfg.type == "vqbet" and device == "mps": + if cfg.type == "vqbet" and str(device) == "mps": raise NotImplementedError( "Current implementation of VQBeT does not support `mps` backend. " "Please use `cpu` or `cuda` backend." From 9f85df2fccc64ce1a88f0c1a0340288cef44f369 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 18:06:55 +0100 Subject: [PATCH 85/94] Simplify config validation logic --- lerobot/configs/eval.py | 11 ----------- lerobot/configs/train.py | 26 +++++++++++++------------- 2 files changed, 13 insertions(+), 24 deletions(-) diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py index 10467fda2c..1187335243 100644 --- a/lerobot/configs/eval.py +++ b/lerobot/configs/eval.py @@ -29,17 +29,6 @@ class EvalPipelineConfig: seed: int | None = 1000 def __post_init__(self): - if not self.device: - logging.warning("No device specified, trying to infer device automatically") - device = auto_select_torch_device() - self.device = device.type - - if self.use_amp and self.device not in ["cuda", "cpu"]: - raise NotImplementedError( - "Automatic Mixed Precision (amp) is only available for 'cuda' and 'cpu' devices. " - f"Selected device: {self.device}" - ) - # HACK: We parse again the cli args here to get the pretrained path if there was one. policy_path = parser.get_path_arg("policy") if policy_path: diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index 1021a3fcdd..3d976e81e5 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -13,7 +13,7 @@ from lerobot.common.optim import OptimizerConfig from lerobot.common.optim.schedulers import LRSchedulerConfig from lerobot.common.utils.hub import HubMixin -from lerobot.common.utils.utils import auto_select_torch_device +from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available from lerobot.configs import parser from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig from lerobot.configs.policies import PreTrainedConfig @@ -129,14 +129,21 @@ def validate(self): device = auto_select_torch_device() self.device = device.type - if self.use_amp and self.device not in ["cuda", "cpu"]: - raise NotImplementedError( - "Automatic Mixed Precision (amp) is only available for 'cuda' and 'cpu' devices. " - f"Selected device: {self.device}" + # Automatically deactivate AMP if necessary + if self.use_amp and not is_amp_available(self.device): + logging.warning( + f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP." ) + self.use_amp = False # HACK: We parse again the cli args here to get the pretrained paths if there was some. - if self.resume: + policy_path = parser.get_path_arg("policy") + if policy_path: + # Only load the policy config + cli_overrides = parser.get_cli_overrides("policy") + self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) + self.policy.pretrained_path = policy_path + elif self.resume: # The entire train config is already loaded, we just need to get the checkpoint dir config_path = parser.parse_arg("config_path") if not config_path: @@ -144,13 +151,6 @@ def validate(self): policy_path = Path(config_path).parent self.policy.pretrained_path = policy_path self.checkpoint_path = policy_path.parent - else: - policy_path = parser.get_path_arg("policy") - if policy_path: - # Only load the policy config - cli_overrides = parser.get_cli_overrides("policy") - self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.pretrained_path = policy_path if not self.job_name: if self.env is None: From 693810f0a3b3f4d21cb41faa5f9aa7097c0fd376 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Tue, 28 Jan 2025 18:28:53 +0100 Subject: [PATCH 86/94] Update example 4 --- examples/4_train_policy_with_script.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md index 45136355e4..5d53296f74 100644 --- a/examples/4_train_policy_with_script.md +++ b/examples/4_train_policy_with_script.md @@ -181,8 +181,8 @@ outputs/train/run_resumption/checkpoints │   │   ├── train_config.json # train config │ │ └── README.md # model card │   └── training_state.pth # optimizer/scheduler/rng state and training step -. -└── last # symlink to the last available checkpoint +├── 000200 +└── last -> 000200 # symlink to the last available checkpoint ``` ## Fine-tuning a pre-trained policy From 03da0a84c31484d8482f8b471f6eb9202e7c986f Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Wed, 29 Jan 2025 11:04:59 +0100 Subject: [PATCH 87/94] Revert "Add pusht hack" This reverts commit aa65bb7619e5bcbe838541bbf0af65a738a13a91. --- lerobot/common/datasets/factory.py | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index f86d242e9b..58ff400e46 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -32,18 +32,6 @@ "std": [[[0.229]], [[0.224]], [[0.225]]], # (c,1,1) } -# Stats used in the original Diffusion repo -PUSHT_STATS = { - "observation.state": { - "min": [13.456424, 32.938293], - "max": [496.14618, 510.9579], - }, - "action": { - "min": [12.0, 25.0], - "max": [511.0, 511.0], - }, -} - def resolve_delta_timestamps( cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata @@ -124,11 +112,4 @@ def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDatas for stats_type, stats in IMAGENET_STATS.items(): dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) - # HACK for pusht - if cfg.dataset.repo_id.startswith("lerobot/pusht"): - for key in PUSHT_STATS: - if key in dataset.meta.features: - for stats_type, stats in PUSHT_STATS[key].items(): - dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) - return dataset From 840b98002b0388fd59175d119343368b2274a2dd Mon Sep 17 00:00:00 2001 From: Remi Date: Wed, 29 Jan 2025 20:47:24 +0100 Subject: [PATCH 88/94] Fix --control.policy.path (#662) --- lerobot/common/robot_devices/control_configs.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lerobot/common/robot_devices/control_configs.py b/lerobot/common/robot_devices/control_configs.py index 418265d94a..a2f3889c3a 100644 --- a/lerobot/common/robot_devices/control_configs.py +++ b/lerobot/common/robot_devices/control_configs.py @@ -89,9 +89,9 @@ class RecordControlConfig(ControlConfig): def __post_init__(self): # HACK: We parse again the cli args here to get the pretrained path if there was one. - policy_path = parser.get_path_arg("policy") + policy_path = parser.get_path_arg("control.policy") if policy_path: - cli_overrides = parser.get_cli_overrides("policy") + cli_overrides = parser.get_cli_overrides("control.policy") self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path @@ -139,3 +139,8 @@ class ReplayControlConfig(ControlConfig): class ControlPipelineConfig: robot: RobotConfig control: ControlConfig + + @classmethod + def __get_path_fields__(cls) -> list[str]: + """This enables the parser to load config from the policy using `--policy.path=local/dir`""" + return ["control.policy"] From 26660413646f0cbce16411f262540eb8b94431de Mon Sep 17 00:00:00 2001 From: Remi Date: Thu, 30 Jan 2025 09:28:02 +0100 Subject: [PATCH 89/94] Fix train real world (#664) --- lerobot/scripts/train.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index d8136681e4..954c796991 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -242,10 +242,9 @@ def evaluate_and_checkpoint_if_needed(step: int, is_online: bool): _num_digits = max(6, len(str(cfg.offline.steps + cfg.online.steps))) step_identifier = f"{step:0{_num_digits}d}" - if cfg.eval_freq > 0 and step % cfg.eval_freq == 0: + if cfg.env is not None and cfg.eval_freq > 0 and step % cfg.eval_freq == 0: logging.info(f"Eval policy at step {step}") with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.use_amp else nullcontext(): - assert eval_env is not None eval_info = eval_policy( eval_env, policy, From ca1bee7fd2ded1f34574b7d99d8e4ba00c91922e Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 31 Jan 2025 09:37:31 +0100 Subject: [PATCH 90/94] Fix torch 2.6 load() --- lerobot/common/optim/factory.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lerobot/common/optim/factory.py b/lerobot/common/optim/factory.py index 9078284f6f..010cd4613d 100644 --- a/lerobot/common/optim/factory.py +++ b/lerobot/common/optim/factory.py @@ -49,7 +49,8 @@ def load_training_state(checkpoint_dir: Path, optimizer: Optimizer, scheduler: L Given the checkpoint directory, load the optimizer state, scheduler state, and random state, and return the global training step. """ - training_state = torch.load(checkpoint_dir / TRAINING_STATE) + # TODO(aliberts): use safetensors instead as weights_only=False is unsafe + training_state = torch.load(checkpoint_dir / TRAINING_STATE, weights_only=False) optimizer.load_state_dict(training_state["optimizer"]) if scheduler is not None: scheduler.load_state_dict(training_state["scheduler"]) From 58adcbec7598773936803d3642ccd4606e648ce3 Mon Sep 17 00:00:00 2001 From: Simon Alibert <75076266+aliberts@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:50:22 +0100 Subject: [PATCH 91/94] Apply suggestions from code review Co-authored-by: HUANG TZU-CHUN <137322177+tc-huang@users.noreply.github.com> --- examples/4_train_policy_with_script.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md index 5d53296f74..9d57d424a1 100644 --- a/examples/4_train_policy_with_script.md +++ b/examples/4_train_policy_with_script.md @@ -1,5 +1,5 @@ This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run. -> **Note:** The following assume you're running these commands on a machine equiped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--device=cpu` (`--device=mps` respectively). However, be advised that the code executes much slower on cpu. +> **Note:** The following assume you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--device=cpu` (`--device=mps` respectively). However, be advised that the code executes much slower on cpu. ## The training script @@ -23,7 +23,7 @@ def train(cfg: TrainPipelineConfig): You can inspect the `TrainPipelineConfig` defined in [`lerobot/configs/train.py`](../../lerobot/configs/train.py) (which is heavily commented and meant to be a reference to understand any option) -When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated for this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionnaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.) +When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated for this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.) Let's have a look at a simplified example. Amongst other attributes, the training config has the following attributes: ```python @@ -42,7 +42,7 @@ class DatasetConfig: video_backend: str = "pyav" ``` -This creates a hierarchical relationship where, for exemple assuming we have a `cfg` instance of `TrainPipelineConfig`, we can access the `repo_id` value with `cfg.dataset.repo_id`. +This creates a hierarchical relationship where, for example assuming we have a `cfg` instance of `TrainPipelineConfig`, we can access the `repo_id` value with `cfg.dataset.repo_id`. From the command line, we can specify this value with using a very similar syntax `--dataset.repo_id=repo/id`. By default, every field takes its default value specified in the dataclass. If a field doesn't have a default value, it needs to be specified either from the command line or from a config file – which path is also given in the command line (more in this below). In the example above, the `dataset` field doesn't have a default value which means it must be specified. From 61b5fe2c4e04640e0cde184710d698b7a493f831 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 31 Jan 2025 12:05:50 +0100 Subject: [PATCH 92/94] Fix examples --- examples/2_evaluate_pretrained_policy.py | 3 +- examples/3_train_policy.py | 6 - .../advanced/2_calculate_validation_loss.py | 141 +++++++++--------- 3 files changed, 74 insertions(+), 76 deletions(-) diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py index 45dcf8e717..0a7b8deb66 100644 --- a/examples/2_evaluate_pretrained_policy.py +++ b/examples/2_evaluate_pretrained_policy.py @@ -30,8 +30,7 @@ # OR a path to a local outputs/train folder. # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") -# TODO: remove revision before merge -policy = DiffusionPolicy.from_pretrained(pretrained_policy_path, revision="remove_hydra", map_location=device) +policy = DiffusionPolicy.from_pretrained(pretrained_policy_path, map_location=device) # Initialize evaluation environment to render two observation types: # an image of the scene and state/position of the agent. The environment diff --git a/examples/3_train_policy.py b/examples/3_train_policy.py index daf5abb33a..635c72935b 100644 --- a/examples/3_train_policy.py +++ b/examples/3_train_policy.py @@ -69,12 +69,6 @@ def main(): # We can then instantiate the dataset with these delta_timestamps configuration. dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps) - # TODO: keep? - # from lerobot.common.optim.factory import make_optimizer_and_scheduler - # from lerobot.configs.train import TrainPipelineConfig - # train_cfg = TrainPipelineConfig.from_pretrained("lerobot/diffusion_pusht", revision="remove_hydra") - # optimizer, _ = make_optimizer_and_scheduler(train_cfg, policy) - # Then we create our optimizer and dataloader for offline training. optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4) dataloader = torch.utils.data.DataLoader( diff --git a/examples/advanced/2_calculate_validation_loss.py b/examples/advanced/2_calculate_validation_loss.py index 25587b2927..71e760726d 100644 --- a/examples/advanced/2_calculate_validation_loss.py +++ b/examples/advanced/2_calculate_validation_loss.py @@ -9,77 +9,82 @@ """ import math -from pathlib import Path import torch -from huggingface_hub import snapshot_download from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy -device = torch.device("cuda") - -# Download the diffusion policy for pusht environment -pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht")) -# OR uncomment the following to evaluate a policy from the local outputs/train folder. -# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") - -# TODO(alibert, rcadene): fix this file -policy = DiffusionPolicy.from_pretrained(pretrained_policy_path) -policy.eval() -policy.to(device) - -# Set up the dataset. -delta_timestamps = { - # Load the previous image and state at -0.1 seconds before current frame, - # then load current image and state corresponding to 0.0 second. - "observation.image": [-0.1, 0.0], - "observation.state": [-0.1, 0.0], - # Load the previous action (-0.1), the next action to be executed (0.0), - # and 14 future actions with a 0.1 seconds spacing. All these actions will be - # used to calculate the loss. - "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], -} - -# Load the last 10% of episodes of the dataset as a validation set. -# - Load dataset metadata -dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht") -# - Calculate train and val episodes -total_episodes = dataset_metadata.total_episodes -episodes = list(range(dataset_metadata.total_episodes)) -num_train_episodes = math.floor(total_episodes * 90 / 100) -train_episodes = episodes[:num_train_episodes] -val_episodes = episodes[num_train_episodes:] -print(f"Number of episodes in full dataset: {total_episodes}") -print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}") -print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}") -# - Load train an val datasets -train_dataset = LeRobotDataset("lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps) -val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps) -print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}") -print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}") - -# Create dataloader for evaluation. -val_dataloader = torch.utils.data.DataLoader( - val_dataset, - num_workers=4, - batch_size=64, - shuffle=False, - pin_memory=device != torch.device("cpu"), - drop_last=False, -) - -# Run validation loop. -loss_cumsum = 0 -n_examples_evaluated = 0 -for batch in val_dataloader: - batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()} - output_dict = policy.forward(batch) - - loss_cumsum += output_dict["loss"].item() - n_examples_evaluated += batch["index"].shape[0] - -# Calculate the average loss over the validation set. -average_loss = loss_cumsum / n_examples_evaluated - -print(f"Average loss on validation set: {average_loss:.4f}") + +def main(): + device = torch.device("cuda") + + # Download the diffusion policy for pusht environment + pretrained_policy_path = "lerobot/diffusion_pusht" + # OR uncomment the following to evaluate a policy from the local outputs/train folder. + # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") + + policy = DiffusionPolicy.from_pretrained(pretrained_policy_path) + policy.eval() + policy.to(device) + + # Set up the dataset. + delta_timestamps = { + # Load the previous image and state at -0.1 seconds before current frame, + # then load current image and state corresponding to 0.0 second. + "observation.image": [-0.1, 0.0], + "observation.state": [-0.1, 0.0], + # Load the previous action (-0.1), the next action to be executed (0.0), + # and 14 future actions with a 0.1 seconds spacing. All these actions will be + # used to calculate the loss. + "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4], + } + + # Load the last 10% of episodes of the dataset as a validation set. + # - Load dataset metadata + dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht") + # - Calculate train and val episodes + total_episodes = dataset_metadata.total_episodes + episodes = list(range(dataset_metadata.total_episodes)) + num_train_episodes = math.floor(total_episodes * 90 / 100) + train_episodes = episodes[:num_train_episodes] + val_episodes = episodes[num_train_episodes:] + print(f"Number of episodes in full dataset: {total_episodes}") + print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}") + print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}") + # - Load train an val datasets + train_dataset = LeRobotDataset( + "lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps + ) + val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps) + print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}") + print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}") + + # Create dataloader for evaluation. + val_dataloader = torch.utils.data.DataLoader( + val_dataset, + num_workers=4, + batch_size=64, + shuffle=False, + pin_memory=device != torch.device("cpu"), + drop_last=False, + ) + + # Run validation loop. + loss_cumsum = 0 + n_examples_evaluated = 0 + for batch in val_dataloader: + batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()} + output_dict = policy.forward(batch) + + loss_cumsum += output_dict["loss"].item() + n_examples_evaluated += batch["index"].shape[0] + + # Calculate the average loss over the validation set. + average_loss = loss_cumsum / n_examples_evaluated + + print(f"Average loss on validation set: {average_loss:.4f}") + + +if __name__ == "__main__": + main() From 02cea7521276db8145fb9c3e0241025bf90de28e Mon Sep 17 00:00:00 2001 From: Simon Alibert <75076266+aliberts@users.noreply.github.com> Date: Fri, 31 Jan 2025 12:06:53 +0100 Subject: [PATCH 93/94] Apply suggestions from code review Co-authored-by: Remi --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 53305474a0..530eca2ec2 100644 --- a/Makefile +++ b/Makefile @@ -105,7 +105,7 @@ test-tdmpc-ete-train: --env.episode_length=5 \ --dataset.repo_id=lerobot/xarm_lift_medium \ --dataset.image_transforms.enable=true \ - --dataset.episodes='[0]' \ + --dataset.episodes="[0]" \ --batch_size=2 \ --offline.steps=2 \ --online.steps=0 \ @@ -136,7 +136,7 @@ test-tdmpc-ete-train-with-online: --env.episode_length=5 \ --dataset.repo_id=lerobot/pusht_keypoints \ --dataset.image_transforms.enable=true \ - --dataset.episodes='[0]' \ + --dataset.episodes="[0]" \ --batch_size=2 \ --offline.steps=2 \ --online.steps=20 \ From 71aee4e5cead29173034fc07435ef38c9ade2c83 Mon Sep 17 00:00:00 2001 From: Simon Alibert Date: Fri, 31 Jan 2025 12:12:24 +0100 Subject: [PATCH 94/94] Add code review suggestions --- lerobot/common/robot_devices/robots/manipulator.py | 2 +- lerobot/scripts/control_sim_robot.py | 2 ++ lerobot/scripts/eval.py | 4 +++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lerobot/common/robot_devices/robots/manipulator.py b/lerobot/common/robot_devices/robots/manipulator.py index ceeaa9a88e..e7f7cbb151 100644 --- a/lerobot/common/robot_devices/robots/manipulator.py +++ b/lerobot/common/robot_devices/robots/manipulator.py @@ -109,7 +109,7 @@ class ManipulatorRobot: ``` Example of highest frequency teleoperation, which doesn't require cameras: - ``` + ```python while True: robot.teleop_step() ``` diff --git a/lerobot/scripts/control_sim_robot.py b/lerobot/scripts/control_sim_robot.py index 9955ea52f0..8d69bf3185 100644 --- a/lerobot/scripts/control_sim_robot.py +++ b/lerobot/scripts/control_sim_robot.py @@ -94,6 +94,8 @@ from lerobot.common.robot_devices.utils import busy_wait from lerobot.common.utils.utils import init_hydra_config, init_logging, log_say +raise NotImplementedError("This script is currently deactivated") + DEFAULT_FEATURES = { "next.reward": { "dtype": "float32", diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 0c765cb964..acc3d9f354 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -234,7 +234,9 @@ def eval_policy( if max_episodes_rendered > 0 and not videos_dir: raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.") - assert isinstance(policy, PreTrainedPolicy) + if not isinstance(policy, PreTrainedPolicy): + raise ValueError(policy) + start = time.time() policy.eval()