python_code
stringlengths
0
456k
import numpy as np import cv2 as cv # aruco adict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50) cv.imshow("marker", cv.aruco.drawMarker(adict, 0, 400)) # random calibration data. your mileage may vary. imsize = (800, 600) K = cv.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True) # AR scene cv.ovis.add...
import numpy as np import cv2 as cv # add some external resources cv.ovis.addResourceLocation("packs/Sinbad.zip") # camera intrinsics imsize = (800, 600) K = np.diag([800, 800, 1]) K[:2, 2] = (400, 500) # offset pp # observer scene owin = cv.ovis.createWindow("VR", imsize) cv.ovis.createGridMesh("ground", (10, 10), ...
#!/usr/bin/env python import os import cv2 as cv import numpy as np from tests_common import NewOpenCVTests, unittest class cudaarithm_test(NewOpenCVTests): def setUp(self): super(cudaarithm_test, self).setUp() if not cv.cuda.getCudaEnabledDeviceCount(): self.skipTest("No CUDA-capable ...
#!/usr/bin/env python import subprocess import os import sys basedir = os.path.dirname(sys.argv[0]) readme = os.path.join(basedir, "README.md") with open(readme) as f: inp = f.read() out = "" it = iter(inp.splitlines(True)) for line in it: out += line if line.startswith("```cmdoutput"): # Get command. ...
import os import re from datetime import datetime from setuptools import find_packages, setup from op_builder.utils import get_cuda_bare_metal_version try: import torch from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension print("\n\ntorch.__version__ = {}\n\n".format(torch.__ve...
import os from .builder import Builder from .utils import append_nvcc_threads class ScaledMaskedSoftmaxBuilder(Builder): NAME = "scaled_masked_softmax" PREBUILT_IMPORT_PATH = "colossalai._C.scaled_masked_softmax" def __init__(self): super().__init__(name=ScaledMaskedSoftmaxBuilder.NAME, prebuilt...
import os from .builder import Builder from .utils import append_nvcc_threads, get_cuda_cc_flag class LayerNormBuilder(Builder): NAME = "layernorm" PREBUILT_IMPORT_PATH = "colossalai._C.layernorm" def __init__(self): super().__init__(name=LayerNormBuilder.NAME, prebuilt_import_path=LayerNormBuil...
import os from .builder import Builder from .utils import get_cuda_cc_flag class FusedOptimBuilder(Builder): NAME = "fused_optim" PREBUILT_IMPORT_PATH = "colossalai._C.fused_optim" def __init__(self): super().__init__(name=FusedOptimBuilder.NAME, prebuilt_import_path=FusedOptimBuilder.PREBUILT_I...
import os from .builder import Builder from .utils import append_nvcc_threads, get_cuda_cc_flag class MultiHeadAttnBuilder(Builder): NAME = "multihead_attention" PREBUILT_IMPORT_PATH = "colossalai._C.multihead_attention" def __init__(self): super().__init__(name=MultiHeadAttnBuilder.NAME, ...
from .cpu_adam import CPUAdamBuilder from .fused_optim import FusedOptimBuilder from .layernorm import LayerNormBuilder from .moe import MOEBuilder from .multi_head_attn import MultiHeadAttnBuilder from .scaled_masked_softmax import ScaledMaskedSoftmaxBuilder from .scaled_upper_triangle_masked_softmax import ScaledUppe...
import importlib import os import time from abc import ABC, abstractmethod from pathlib import Path from typing import List def print_rank_0(message): """ Print on only one process to avoid spamming. """ try: import torch.distributed as dist if not dist.is_initialized(): is...
import os from .builder import Builder from .utils import append_nvcc_threads class CPUAdamBuilder(Builder): NAME = "cpu_adam" PREBUILT_IMPORT_PATH = "colossalai._C.cpu_adam" def __init__(self): super().__init__(name=CPUAdamBuilder.NAME, prebuilt_import_path=CPUAdamBuilder.PREBUILT_IMPORT_PATH) ...
import re import subprocess from typing import List def get_cuda_bare_metal_version(cuda_dir): raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) output = raw_output.split() release_idx = output.index("release") + 1 release = output[release_idx].split(".") ...
import os from .builder import Builder from .utils import append_nvcc_threads, get_cuda_cc_flag class ScaledUpperTrainglemaskedSoftmaxBuilder(Builder): NAME = "scaled_upper_triangle_masked_softmax" PREBUILT_IMPORT_PATH = "colossalai._C.scaled_upper_triangle_masked_softmax" def __init__(self): su...
import os from .builder import Builder from .utils import append_nvcc_threads, get_cuda_cc_flag class MOEBuilder(Builder): NAME = "moe" PREBUILT_IMPORT_PATH = "colossalai._C.moe" def __init__(self): super().__init__(name=MOEBuilder.NAME, prebuilt_import_path=MOEBuilder.PREBUILT_IMPORT_PATH) ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial from pathlib import Path import pytest import torch import torch.multiprocessing as mp from colossalai import launch from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai...
#!/usr/bin/env python # -*- encoding: utf-8 -*- parallel = dict( pipeline=dict(size=2), tensor=dict( size=4, mode='2d' ) )
#!/usr/bin/env python # -*- encoding: utf-8 -*- parallel = dict( pipeline=dict(size=2), tensor=dict( size=8, mode='3d' ) )
#!/usr/bin/env python # -*- encoding: utf-8 -*- parallel = dict( pipeline=dict(size=2), tensor=dict( size=8, depth=2, mode='2.5d' ) )
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import colossalai import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.tes...
from functools import partial import colossalai from colossalai.utils.cuda import get_current_device import pytest import torch import torch.multiprocessing as mp from colossalai.nn.optimizer import HybridAdam from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai.utils import free_por...
import pytest import colossalai from colossalai.utils.cuda import get_current_device from colossalai.gemini.tensor_utils import (colo_tensor_mem_usage, colo_model_data_tensor_move, colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu, ...
from copy import deepcopy from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.zero.shard_utils import (BucketTensorShardStrategy, Tens...
import pytest import colossalai import torch import torch.multiprocessing as mp from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils.cuda import get_current_device from colossalai.utils import free_port from functools import partial from tests.test_tensor.common_utils import set_seed from tes...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from common import CONFIG, check_sharded_model_params from torch.nn.parallel import DistributedDataParallel as DDP import colossalai from colossalai.amp import convert_to_apex_amp from colossal...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import colossalai import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.core import global_context as gpc from colossalai.testing import rerun_if_address_is_in_use from colossalai.uti...
from functools import partial import torch import torch.distributed as dist from colossalai.logging import get_dist_logger from colossalai.utils import checkpoint from colossalai.zero.shard_utils import TensorShardStrategy from colossalai.zero.sharded_model import ShardedModelV2 LOGGER = get_dist_logger('zero_test') ...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import pytest import torch import torch.multiprocessing as mp from common import CONFIG import colossalai from colossalai.gemini.memory_tracer.utils import colo_model_mem_usage from colossalai.logging import get_dist_logger from colossalai...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import pytest import torch import torch.multiprocessing as mp from common import CONFIG, check_grads_padding, run_fwd_bwd from torch.nn.parallel import DistributedDataParallel as DDP import colossalai from colossalai.testing import paramet...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from copy import deepcopy from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.zero.in...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing import assert_close import colossalai from colossalai.tensor import ProcessGroup from colossalai.testing import parameterize...
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn import colossalai from colossalai.tensor import ProcessGroup from colossalai.utils import free_port, get_current_device from colossalai.utils.model.colo_init_context impor...
import copy from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing import assert_close import colossalai from colossalai.testing.random import seed_all from colossalai.utils impor...
import copy from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing import assert_close import colossalai from colossalai.testing.random import seed_all from colossalai.utils impor...
import pytest import torch from einops import rearrange from colossalai.kernel.cuda_native.flash_attention import HAS_FLASH_ATTN, HAS_MEM_EFF_ATTN, HAS_TRITON if HAS_FLASH_ATTN: from colossalai.kernel.cuda_native.flash_attention import ( MaskedFlashAttention, flash_attention_q_k_v, flash_a...
import torch from colossalai.utils.model.lazy_init_context import LazyInitContext from torchvision.models import resnet34 import random import numpy as np MANUAL_SEED = 0 random.seed(MANUAL_SEED) np.random.seed(MANUAL_SEED) torch.manual_seed(MANUAL_SEED) def test_lazy_init_with_meta(): ctx = LazyInitContext(to_m...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import copy import colossalai from colossalai.zero.sharded_model.sharded_model_v2 import ShardedModelV2 import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn from colossalai.logging import disable_existing_l...
from colossalai.tensor import distspec, ColoTensorSpec, ProcessGroup from colossalai.tensor.colo_parameter import ColoParameter import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.logging import disable_existing_loggers from colossalai.utils import free_port, get_current_devi...
from colossalai.utils import free_port from colossalai.testing import rerun_if_address_is_in_use from colossalai.zero.sharded_param import ShardedTensor from colossalai.gemini.tensor_utils import colo_model_data_tensor_move, colo_model_data_tensor_move_inline import colossalai import torch import torch.multiprocessin...
import os, shutil import torch import pytest from copy import deepcopy from functools import partial import torch.multiprocessing as mp import torch.distributed as dist from torch.optim.lr_scheduler import CosineAnnealingLR from torch.optim.lr_scheduler import MultiplicativeLR from colossalai.nn.lr_scheduler import C...
import pytest import colossalai from colossalai.utils.cuda import get_current_device from colossalai.utils.memory import colo_set_process_memory_fraction, colo_device_memory_capacity from colossalai.utils import free_port from functools import partial import torch.multiprocessing as mp def _run_colo_set_process_mem...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import pytest import torch import torch.nn.functional as F from colossalai.context.parallel_mode import ParallelMode from colossalai.context.random import add_seed, seed, set_mode, reset_seeds from colossalai.utils.activation_checkpoint import checkpoint def forward(x,...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import pprint from functools import partial import colossalai.nn as col_nn import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc fro...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import pprint from functools import partial import colossalai.nn as col_nn import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc fro...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import pprint from functools import partial import colossalai.nn as col_nn import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc fro...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import pprint from functools import partial import colossalai.nn as col_nn import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc fro...
import os from functools import partial from tempfile import TemporaryDirectory from typing import Dict import colossalai import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils ...
import torch import torch.nn as nn from colossalai.utils.checkpoint_io.meta import ParamDistMeta from colossalai.utils.checkpoint_io.utils import build_checkpoints from torch.optim import Adam class DummyModel(nn.Module): def __init__(self) -> None: super().__init__() self.fc = nn.Linear(20, 1) ...
import os from functools import partial from tempfile import TemporaryDirectory import colossalai import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port from co...
from colossalai.utils.checkpoint_io.meta import ParamDistMeta from colossalai.utils.checkpoint_io.constant import GLOBAL_META_FILE_NAME from colossalai.utils.checkpoint_io.io import save, merge from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port from tempfile import Temporar...
import torch from colossalai.utils.checkpoint_io.meta import ParamRedistMeta from colossalai.utils.checkpoint_io.distributed import flatten_zero_param, split_tp_param, unmerge_param def test_flatten_zero_param_even() -> None: redist_meta = ParamRedistMeta(4, 1, zero_start_dp_rank=0, zero_offsets=[0, 4, 8, 12]) ...
import torch from colossalai.utils.checkpoint_io.meta import ParamDistMeta from colossalai.utils.checkpoint_io.distributed import unflatten_zero_param, gather_tp_param, merge_param def test_unflatten_zero_param_even() -> None: dist_metas = [ParamDistMeta(i, 4, 0, 1, zero_numel=16, zero_orig_shape=[4, 4]) for i in...
from copy import deepcopy from functools import partial from tempfile import TemporaryDirectory from typing import Dict import colossalai import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn from colossalai.testing import rerun_if_address_is_in_use from c...
import copy import pytest import colossalai import torch import torch.multiprocessing as mp from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils.cuda import get_current_device from colossalai.utils import free_port from colossalai.utils.model.colo_init_context import ColoInitContext from fun...
import pytest import colossalai import torch import torch.multiprocessing as mp from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils.cuda import get_current_device from colossalai.utils import free_port from functools import partial from colossalai.nn.parallel.reducer import Reducer import to...
import os import random from functools import partial from typing import Callable, Type import numpy as np import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import colossalai from colossalai.gemini.chunk import ChunkManager, search_chunk_configuration from colossalai.gemin...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import os from pathlib import Path import pytest from torchvision import transforms, datasets from torch.utils.data import DataLoader @pytest.mark.cpu def test_cifar10_dataset(): # build transform transform_pipeline = [transforms.ToTensor()] transform_pipe...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import os from functools import partial from pathlib import Path import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp import colossalai from torchvision import transforms, datasets from colossalai.context import ParallelMode, C...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import os from functools import partial from pathlib import Path import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from torchvision import transforms, datasets import colossalai from colossalai.context import ParallelMode, C...
import torch from colossalai.auto_parallel.tensor_shard.utils import ( get_broadcast_shape, is_broadcastable, recover_sharding_spec_for_broadcast_shape, ) from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.sharding_spec import ShardingSpec def test_is_broadcastable(): x1 = to...
import torch from colossalai.auto_parallel.tensor_shard.options import SolverOptions from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationDataType from colossalai.auto_parallel.tensor_shard.solver import CostGraph, GraphAnalyser, Solver, StrategiesConstructor from colossalai.device.device_mesh ...
import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.solver import GraphAnalyser from colossalai.fx import ColoGraphModule, ColoTracer class LinearModel(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(4, 4) self.relu = nn.ReLU(inplace=...
import copy from functools import partial import pytest import torch import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP from colossalai.auto_parallel.tensor_shard.initialize import initialize_model from colossalai.device.device_mesh import DeviceMesh from colossalai.initia...
import torch from torch.fx import GraphModule from torchvision.models import resnet50 from colossalai.auto_parallel.tensor_shard.constants import BATCHNORM_MODULE_OP from colossalai.auto_parallel.tensor_shard.options import SolverOptions from colossalai.auto_parallel.tensor_shard.solver import CostGraph, GraphAnalyser...
import copy from functools import partial import pytest import torch import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP from colossalai.auto_parallel.tensor_shard.initialize import initialize_model from colossalai.device.device_mesh import DeviceMesh from colossalai.initia...
import copy from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.initialize import initialize_model from colossalai.device.device_mesh import DeviceMesh from colossalai.initialize import launch from colossalai.logg...
from functools import partial import pytest import torch import torch.multiprocessing as mp from colossalai.auto_parallel.tensor_shard.initialize import initialize_model from colossalai.device.device_mesh import DeviceMesh from colossalai.initialize import launch from colossalai.logging import disable_existing_logger...
from functools import partial from typing import Optional, Tuple, Union import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from torch.utils.checkpoint import checkpoint from transformers.pytorch_utils import Conv1D from colossalai.auto_parallel.tensor_shard.initialize import initializ...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import LinearModuleHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import ShardingStrategy, StrategiesVector from colossalai.de...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.meta_profiler import meta_register from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType from colossalai.device.device_mesh imp...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.device.device_mesh import DeviceMesh from colossalai.fx import ColoGraphModule, ColoTracer from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers ...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.device.device_mesh import DeviceMesh from colossalai.fx import ColoGraphModule, ColoTracer from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers ...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.device.device_mesh import DeviceMesh from colossalai.fx import ColoGraphModule, ColoTracer from colossalai.initialize import launch from colossalai.logging import disable_existing_loggers ...
import copy from pprint import pprint from typing import Dict, List import torch from torch.fx import GraphModule from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass from colossalai.auto_parallel.passes.runtime_preparation_pass import runtime_preparation_pass from colossalai.auto_paralle...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, OperationDataType, ShardingStrategy, StrategiesVector, TrainCycleItem, ) from colos...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import LinearModuleHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, OperationDat...
import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.getattr_handler import GetattrHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector from colossalai.device.device_mesh import DeviceMesh from colossalai.fx...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import SplitHandler from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler from colossalai.auto_parallel.te...
import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import DefaultReshapeHandler from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, S...
import pytest import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.matmul_handler import ( MatMulHandler, MatMulType, _get_bmm_logical_shape, get_matmul_type, ) from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( OperationData, OperationDa...
import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.output_handler import OutputHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector from colossalai.device.device_mesh import DeviceMesh from colossalai.fx i...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn import torch.nn.functional as F from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler from colossalai.auto_parallel.tensor_shard.node_handler.softmax_handler ...
import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.placeholder_handler import PlaceholderHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector from colossalai.device.device_mesh import DeviceMesh from colos...
from functools import partial import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler from colossalai.auto_parallel.tensor_shard.options import ShardOption from colossalai.auto_parallel.tensor_shard.sharding_strategy im...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import PermuteHandler, TransposeHandler from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler from colossa...
from faulthandler import disable from functools import partial from xml.dom import WrongDocumentErr import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from typing_extensions import Self from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler from colossala...
import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.where_handler import \ WhereHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import (OperationData, OperationDataType, StrategiesVector) from colossalai.device.device_mesh import DeviceMesh from colossala...
import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.tensor_constructor_handler import TensorConstructorHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector from colossalai.device.device_mesh import DeviceMe...
from faulthandler import disable from functools import partial from xml.dom import WrongDocumentErr import pytest import torch import torch.multiprocessing as mp import torch.nn as nn import torch.nn.functional as F from typing_extensions import Self from colossalai.auto_parallel.tensor_shard.node_handler import Line...
import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler from colossalai.auto_parallel.tensor_shard.node_handler.unary_elementwise_handler import UnaryElementwiseHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import Operati...
import copy from typing import Dict, List import torch from torch.fx import GraphModule from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass from colossalai.auto_parallel.passes.runtime_preparation_pass import runtime_preparation_pass from colossalai.auto_parallel.tensor_shard.options imp...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import ViewHandler from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler from colossalai.auto_parallel.ten...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler, LinearModuleHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( OperationData, Opera...
from functools import partial import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler from col...
import pytest import torch import torch.nn as nn from colossalai.auto_parallel.tensor_shard.node_handler.normal_pooling_handler import NormPoolingHandler from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector from colossalai.device.device_mesh import Devi...
from faulthandler import disable from functools import partial from xml.dom import WrongDocumentErr import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from typing_extensions import Self from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler, LinearModuleH...
Free AI Image Generator No sign-up. Instant results. Open Now