python_code
stringlengths
0
456k
from typing import Dict, List, Tuple from torch.fx.node import Node from .trace_indice import TraceIndice from .utils import ( NodeMgr, find_chunk_all_input_nodes, find_chunk_compute_input_and_output_nodes, find_tensor_shape_node, flat_list, get_node_name, get_node_shape, is_non_comput...
from .estimate_memory import EstimateMemory from .reorder_graph import ReorderGraph from .trace_indice import TraceIndice from .utils import NodeMgr, is_non_compute_node class SelectChunk(object): def __init__( self, trace_indice: TraceIndice, estimate_memory: EstimateMemory, reor...
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union from torch.fx.node import Node from colossalai.logging import get_dist_logger NON_COMPUTE_OP = ["placeholder", "get_attr", "output"] NON_COMPUTE_NAME = ["getattr", "eq", "_assert_is_none", "_assert", "finfo", "size"] logger = get_dist_logger() cl...
import copy from typing import Dict, List, Tuple from torch.fx.node import Node from .utils import NodeMgr, find_first_tensor_arg, flat_list, get_module_node_name, get_node_name, get_node_shape class TraceIndice(object): """ Trace all indice infomation for every node. Indice is a logical concept. Equal...
from typing import Any, Dict, Iterable, List, Tuple import torch import colossalai from colossalai.fx._compatibility import is_compatible_with_meta from colossalai.fx.codegen.activation_checkpoint_codegen import CODEGEN_AVAILABLE AUTOCHUNK_AVAILABLE = CODEGEN_AVAILABLE and is_compatible_with_meta() if AUTOCHUNK_AVA...
from .cli import cli __all__ = ['cli']
import click from .benchmark import benchmark from .check import check from .launcher import run class Arguments(): def __init__(self, arg_dict): for k, v in arg_dict.items(): self.__dict__[k] = v @click.group() def cli(): pass cli.add_command(run) cli.add_command(check) cli.add_comm...
from functools import partial from typing import Dict, List import click import torch.multiprocessing as mp import colossalai from colossalai.cli.benchmark.utils import find_all_configs, get_batch_data, profile_model from colossalai.context import Config from colossalai.context.random import reset_seeds from colossal...
import torch import colossalai.nn as col_nn class MLP(torch.nn.Module): def __init__(self, dim: int, layers: int): super().__init__() self.layers = torch.nn.ModuleList() for _ in range(layers): self.layers.append(col_nn.Linear(dim, dim)) def forward(self, x): for...
import click from colossalai.context import Config from .benchmark import run_benchmark from .utils import * __all__ = ['benchmark'] @click.command() @click.option("-g", "--gpus", type=int, default=None, help="Total number of devices to use.") @click.option("-b", "--batch_size", type=int, default=8, help="Batch si...
import math import time import torch from colossalai.utils import MultiTimer from colossalai.context import ParallelMode, Config from typing import List, Dict, Tuple, Callable def get_time_stamp() -> int: """ Return the time stamp for profiling. Returns: time_stamp (int): the time given by time....
import os import sys from typing import List import click import torch from packaging import version from colossalai.context import Config from .hostinfo import HostInfo, HostInfoList from .multinode_runner import MultiNodeRunner # Constants that define our syntax NODE_SEP = ',' def fetch_hostfile(hostfile_path: ...
from multiprocessing import Pipe, Process from multiprocessing import connection as mp_connection import click import fabric from .hostinfo import HostInfo, HostInfoList def run_on_host(hostinfo: HostInfo, workdir: str, recv_conn: mp_connection.Connection, send_conn: mp_connection.Connection, env: d...
import click from colossalai.context import Config from .run import launch_multi_processes @click.command(help="Launch distributed training on a single node or multiple nodes", context_settings=dict(ignore_unknown_options=True)) @click.option("-H", "-host", "--host", ...
import socket from typing import List class HostInfo: """ A data class to store host connection-related data. Args: hostname (str): name or IP address of the host port (str): the port for ssh connection """ def __init__( self, hostname: str, port: str = No...
import click from .check_installation import check_installation __all__ = ['check'] @click.command(help="Check if Colossal-AI is correct based on the given option") @click.option('-i', '--installation', is_flag=True, help="Check if Colossal-AI is built correctly") def check(installation): if installation: ...
import subprocess import click import torch from torch.utils.cpp_extension import CUDA_HOME import colossalai def to_click_output(val): # installation check output to understandable symbols for readability VAL_TO_SYMBOL = {True: u'\u2713', False: 'x', None: 'N/A'} if val in VAL_TO_SYMBOL: retur...
from abc import ABC, abstractmethod from copy import deepcopy from typing import Any, List import torch from torch.fx import Graph, Node from colossalai.auto_parallel.passes.runtime_apply_pass import ( runtime_apply, runtime_apply_for_iterable_object, runtime_comm_spec_apply, ) from colossalai.fx.codegen....
from .ckpt_solver_base import CheckpointSolverBase from .ckpt_solver_chen import CheckpointSolverChen from .ckpt_solver_rotor import CheckpointSolverRotor
import math from abc import ABC from typing import Any, Iterable, List from torch.utils._pytree import tree_map class Chain: def __init__(self, ftime: List[float], btime: List[float], x: List[int], xbar: List[int], ftmp: List[i...
from copy import deepcopy from typing import Any, Dict, List, Tuple from torch import Tensor from torch.fx import Graph, Node from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply, runtime_comm_spec_apply from colossalai.fx.codegen.activation_checkpoint_codegen import _find_nested_ckpt_regions ...
import math from copy import deepcopy from typing import List, Set, Tuple from torch.fx import Graph, Node from colossalai.fx.profiler import calculate_fwd_in, calculate_fwd_tmp from .ckpt_solver_base import CheckpointSolverBase __all__ = ['CheckpointSolverChen'] class CheckpointSolverChen(CheckpointSolverBase): ...
import os from setuptools import Extension, setup this_dir = os.path.dirname(os.path.abspath(__file__)) ext_modules = [Extension( 'rotorc', sources=[os.path.join(this_dir, 'ckpt_solver_rotor.c')], )] setup( name='rotor c extension', version='0.1', description='rotor c extension for faster dp comp...
from typing import Callable, List import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, OperationDataType, ShardingStrategy, StrategiesVector, TrainCycleItem, ) from colossalai.tensor.sharding_spec import ShardingSpec from .constants imp...
__all__ = ['Registry'] class Registry: def __init__(self, name): self.name = name self.store = {} def register(self, source): def wrapper(func): if isinstance(source, (list, tuple)): # support register a list of items for this func for ele...
import operator import torch import torch.nn as nn from ..tensor_shard.constants import * # list of inplace module INPLACE_MODULE = [nn.ReLU] # list of inplace operations INPLACE_OPS = [torch.flatten] # list of operations that do not save forward activations NO_SAVE_ACTIVATION = [torch.add, torch.sub, operator.add...
from .meta_registry import * from .metainfo import * from .registry import meta_register
from functools import reduce from typing import Callable, Dict, List, Tuple, Union import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, OperationDataType, ShardingStrategy, StrategiesVector, TrainCycleItem, ) from colossalai.fx.profiler....
from typing import Callable, Dict, List, Tuple, Union import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, OperationDataType, ShardingStrategy, StrategiesVector, TrainCycleItem, ) from colossalai.fx.profiler.memory_utils import activatio...
from typing import List, Tuple import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem from colossalai.fx.profiler.memory_utils import activation_size from colossalai.fx.profiler.opcount import flop_mapping from ..constants import BCAST_FUNC_OP, ...
from typing import List, Tuple import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem from colossalai.fx.profiler.memory_utils import activation_size from colossalai.fx.profiler.opcount import flop_mapping from ..registry import meta_register _...
from .activation import * from .binary_elementwise_ops import * from .conv import * from .linear import * from .norm import * from .pooling import *
from typing import List, Tuple import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, OperationDataType, TrainCycleItem from colossalai.fx.profiler.memory_utils import activation_size from colossalai.fx.profiler.opcount import flop_mapping from ..registry import meta_register _...
from typing import Callable, Dict, List, Tuple, Union import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, OperationDataType, ShardingStrategy, StrategiesVector, TrainCycleItem, ) from colossalai.fx.profiler.memory_utils import activatio...
from copy import deepcopy from typing import Dict, List import torch from torch.fx.node import Node from colossalai.auto_parallel.meta_profiler import MetaInfo from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, OperationData, OperationDataType, TrainCycleIt...
import uuid from dataclasses import asdict from typing import List import torch import torch.fx from torch.fx import GraphModule from torch.fx.node import Node from colossalai.auto_parallel.meta_profiler import MetaInfo from colossalai.auto_parallel.passes.constants import OUTPUT_SAVED_MOD, OUTPUT_SAVED_OPS from colo...
import torch OUTPUT_SAVED_OPS = [torch.nn.functional.relu, torch.nn.functional.softmax, torch.flatten] OUTPUT_SAVED_MOD = [ torch.nn.ReLU, torch.nn.Softmax, ] # SHAPE_ARGUMENT_OPS contains node with (input, *shape) style args. # This list could be extended if any other method has the same # argument style as...
from typing import Dict import torch from torch.fx import GraphModule from torch.fx.node import Node from colossalai.auto_parallel.meta_profiler import MetaInfo from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply, runtime_comm_spec_apply from colossalai.auto_parallel.tensor_shard.sharding_str...
import operator from copy import deepcopy from typing import Dict, List, Union import torch from torch.fx import symbolic_trace from torch.fx.node import Node from colossalai.auto_parallel.tensor_shard.constants import RESHAPE_FUNC_OP from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommActio...
from dataclasses import dataclass from enum import Enum __all__ = ['SolverOptions', 'SolverPerference', 'DataloaderOption', 'ShardOption'] class SolverPerference(Enum): """ This enum class is to define the solver preference. """ STANDARD = 0 DP = 1 TP = 2 class ShardOption(Enum): """ ...
from copy import deepcopy from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Tuple, Union import torch from torch.fx.node import Node from colossalai.tensor.comm_spec import CommSpec from colossalai.tensor.sharding_spec import ShardingSpec from .constants import ( BCAST_F...
from typing import Dict, List, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.fx import GraphModule from torch.fx.graph import Graph from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass from colossalai.auto_parallel.passes.runtime_preparation_pass imp...
import operator import torch __all__ = [ 'ELEMENTWISE_MODULE_OP', 'ELEMENTWISE_FUNC_OP', 'RESHAPE_FUNC_OP', 'CONV_MODULE_OP', 'CONV_FUNC_OP', 'LINEAR_MODULE_OP', 'LINEAR_FUNC_OP', 'BATCHNORM_MODULE_OP', 'POOL_MODULE_OP', 'NON_PARAM_FUNC_OP', 'BCAST_FUNC_OP', 'EMBEDDING_MODULE_OP', 'LAYERNORM_MODULE_OP', '...
import operator from copy import deepcopy from functools import reduce from typing import Dict import torch from colossalai.tensor.sharding_spec import ShardingSpec __all__ = [ 'transpose_partition_dim', 'update_partition_dim', 'enumerate_all_possible_1d_sharding', 'enumerate_all_possible_2d_sharding', 'gene...
import functools from typing import Any, Callable, Dict, List, Tuple, Type, Union import torch from colossalai.logging import get_dist_logger from colossalai.tensor.sharding_spec import ShardingSpec, ShardingSpecException __all__ = ['ignore_sharding_exception', 'pytree_map'] def ignore_sharding_exception(func): ...
from enum import Enum from typing import Dict, List, Tuple import torch class PreviousStatus(Enum): """ This class shows the status of previous comparision. """ RESET = 0 # ORIGIN means the dimension size of original tensor is larger in the previous comparision. ORIGIN = 1 # TGT means the...
from .broadcast import ( BroadcastType, comm_actions_for_oprands, get_broadcast_shape, is_broadcastable, recover_sharding_spec_for_broadcast_shape, ) from .factory import generate_resharding_costs, generate_sharding_spec from .misc import check_sharding_spec_validity, ignore_sharding_exception, pytr...
import operator import warnings from functools import reduce from typing import Dict, List, Optional, Union import torch from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.shape_consistency import ShapeConsistencyManager from colossalai.tensor.sharding_spec import ShardingSpec from torch.fx.no...
from enum import Enum, auto from typing import List import torch from torch.fx.node import Node from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, OperationData, OperationDataType, ) from colossalai.tensor.comm_spec import CollectiveCommPattern, CommSpec from c...
import builtins import math import operator from copy import deepcopy from typing import Dict, List import torch from torch.fx import Graph, Node from colossalai.auto_parallel.tensor_shard.node_handler import ( GetattrHandler, OutputHandler, PlaceholderHandler, operator_registry, ) from colossalai.aut...
import torch from colossalai.auto_parallel.tensor_shard.constants import INFINITY_COST class CostGraph: ''' A graph data structure to simplify the edge cost graph. It has two main functions: 1. To feed the quadratic resharding costs into solver, we need to linearize it. We build edge_cost in CostGrap...
import multiprocessing import time import warnings from typing import Dict import numpy as np from torch.fx.graph import Graph from torch.fx.node import Node from colossalai.auto_parallel.tensor_shard.constants import INFINITY_COST from .cost_graph import CostGraph from .graph_analysis import GraphAnalyser from .str...
from .cost_graph import CostGraph from .graph_analysis import GraphAnalyser from .solver import Solver from .strategies_constructor import StrategiesConstructor __all__ = ['GraphAnalyser', 'Solver', 'StrategiesConstructor', 'CostGraph']
from dataclasses import dataclass from typing import List from torch.fx.graph import Graph from torch.fx.graph_module import GraphModule from torch.fx.node import Node from colossalai.fx.passes.utils import get_node_module __all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser'] @dataclass cl...
from abc import ABC, abstractmethod from typing import Dict, List, Tuple, Union import torch from torch.fx.node import Node from colossalai.auto_parallel.meta_profiler.metainfo import MetaInfo, meta_register from colossalai.auto_parallel.tensor_shard.options import ShardOption, SolverPerference from colossalai.auto_p...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoNodeHandler, NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, UnaryElementwiseGenerator __all__ = ['UnaryElementwiseHandler'] @op...
from typing import Dict, List import torch from colossalai.auto_parallel.meta_profiler.metainfo import MetaInfo from ..sharding_strategy import OperationData, OperationDataType, StrategiesVector from .node_handler import MetaInfoModuleHandler, ModuleHandler from .registry import operator_registry from .strategy impo...
from typing import Dict, List, Union import torch from torch.fx.node import Node from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, ShardingStrategy from colossalai.tensor.shape_consistency import CollectiveCommPattern, CommSpec, ShapeConsistencyManager from ..const...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoModuleHandler, ModuleHandler from .registry import operator_registry from .strategy import LayerNormGenerator, StrategyGenerator __all__ = ['LayerNormModuleHandler'] @operat...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import SplitGenerator, StrategyGenerator __all__ = ['SplitHandler'] @operator_registry.register(torch.Tensor.split...
from typing import Dict, List import torch import torch.nn.functional as F from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy, StrategiesVector from ..utils import transpose_partition_dim from .node_handler import MetaInfoModuleHandler, MetaInfoNodeHandler, ModuleHandler, NodeHandler f...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoModuleHandler, ModuleHandler from .registry import operator_registry from .strategy import NormalPoolStrategyGenerator, StrategyGenerator __all__ = ['NormPoolingHandler'] @o...
import operator from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, TensorStrategyGenerator, TensorTupleStrategyGenerator __all__ = ['GetItemH...
class Registry: # TODO: refactor the registry classes used in colossalai.registry, colossalai.fx and here def __init__(self, name): self.name = name self.store = {} def register(self, source): def wrapper(func): if isinstance(source, (list, tuple)): # s...
import copy import operator from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy, StrategiesVector from ..utils import recover_sharding_spec_for_broadcast_shape from .node_handler import NodeHandler from .registry import operator_registry from ....
from typing import Dict, List, Union import torch import torch.nn.functional as F from colossalai.auto_parallel.tensor_shard.utils import update_partition_dim from colossalai.logging import get_dist_logger from colossalai.tensor.sharding_spec import ShardingNotDivisibleError from ..sharding_strategy import Operation...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import PermuteGenerator, StrategyGenerator __all__ = ['PermuteHandler'] @operator_registry.register(torch.Tensor.p...
from .addmm_handler import ADDMMFunctionHandler from .batch_norm_handler import BatchNormModuleHandler from .binary_elementwise_handler import BinaryElementwiseHandler from .bmm_handler import AddBMMFunctionHandler, BMMFunctionHandler from .conv_handler import ConvFunctionHandler, ConvModuleHandler from .default_reshap...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, ViewGenerator __all__ = ['ViewHandler'] @operator_registry.register(torch.Tensor.reshape...
from typing import Dict, List, Union import torch import torch.nn.functional as F from colossalai.auto_parallel.tensor_shard.utils import ( check_sharding_spec_validity, transpose_partition_dim, update_partition_dim, ) from colossalai.logging import get_dist_logger from colossalai.tensor.sharding_spec imp...
from typing import Dict, List, Union import torch from colossalai.tensor.shape_consistency import CollectiveCommPattern, CommSpec, ShapeConsistencyManager from ..sharding_strategy import CommAction, CommType, OperationData, OperationDataType, ShardingStrategy from ..utils import comm_actions_for_oprands, recover_sha...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoNodeHandler, NodeHandler from .registry import operator_registry from .strategy import DefaultReshapeGenerator, StrategyGenerator __all__ = ['DefaultReshapeHandler'] @operat...
from typing import Dict, List from torch.fx.node import Node from colossalai.device.device_mesh import DeviceMesh from ..sharding_strategy import OperationData, OperationDataType, StrategiesVector from .node_handler import NodeHandler from .strategy import PlaceholderGenerator, StrategyGenerator __all__ = ['Placeho...
from typing import Dict, List from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .strategy import GetattrGenerator, StrategyGenerator __all__ = ['GetattrHandler'] class GetattrHandler(NodeHandler): """ A GetattrHandler which deals with the sharding st...
from typing import Dict, List, Union import torch from colossalai.tensor.shape_consistency import CollectiveCommPattern, CommSpec, ShapeConsistencyManager from ..sharding_strategy import CommAction, CommType, OperationData, OperationDataType, ShardingStrategy from ..utils import comm_actions_for_oprands, recover_sha...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, TransposeGenerator __all__ = ['TransposeHandler'] @operator_registry.register(torch.Tens...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import SoftmaxGenerator, StrategyGenerator __all__ = ['SoftmaxHandler'] @operator_registry.register(torch.nn.Softm...
from typing import Dict, List import torch from colossalai.device.device_mesh import DeviceMesh from ..sharding_strategy import OperationData, OperationDataType, StrategiesVector from .node_handler import NodeHandler from .strategy import OutputGenerator, StrategyGenerator __all__ = ['OutputHandler'] class Output...
import operator from abc import ABC, abstractmethod from copy import deepcopy from enum import Enum from functools import reduce from typing import Dict, List, Union import torch from colossalai.auto_parallel.tensor_shard.utils.broadcast import ( BroadcastType, get_broadcast_dim_info, get_broadcast_shape,...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator from .strategy.tensor_constructor_generator import TensorConstructorGenerator __all__ = ['T...
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, SumGenerator __all__ = ['SumHandler'] @operator_registry.register(torch.Tensor.sum) @ope...
import copy from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import (MemoryCost, ShardingStrategy, TrainCycleItem) from .strategy_generator import FollowingStrategyGenerator __all__ = ['UnaryElementwiseGenerator'] class UnaryElementwiseGenerator(FollowingStrategyGenerator): ...
import copy import operator import warnings from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ignore...
import copy from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_2d_sharding, ignore_sharding_exception, )...
from typing import Dict, List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, ShardingStrategy, TrainCycleItem, ) from colossalai.device.device_mesh import DeviceMesh from .strategy_generator import StrategyGenerator __all__ = ['PlaceholderGenerator'] ...
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.node_handler.strategy.strategy_generator import FollowingStrategyGenerator from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, MemoryCost, ...
from .batch_norm_generator import BatchNormStrategyGenerator from .binary_elementwise_generator import BinaryElementwiseStrategyGenerator from .conv_strategy_generator import ConvStrategyGenerator from .embedding_generator import EmbeddingStrategyGenerator from .getattr_generator import GetattrGenerator from .getitem_g...
import operator from abc import ABC, abstractmethod from functools import reduce from typing import Any, Dict, List, Union import torch from torch.fx import Node from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, OperationData, OperationDataType, ShardingSt...
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.node_handler.strategy.strategy_generator import FollowingStrategyGenerator from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, MemoryCost, ...
from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_2d_sharding, ignore_sharding_exception, ) from coloss...
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ignore_sharding_exception from colossa...
import copy from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.logging import get_dist_logger from colossalai.tensor.shape_consistency import CollectiveCommPattern from colossalai.tensor...
import copy from typing import List from colossalai.auto_parallel.tensor_shard.node_handler.strategy.strategy_generator import FollowingStrategyGenerator from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from ...
import operator from ast import arg from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.options import SolverPerference from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from coloss...
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_shardi...
import copy import operator import warnings from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ignore...