File size: 13,739 Bytes
afeb8f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
program(1.0)
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3405.2.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.3.0"}})]
{
    func main<ios15>(tensor<fp32, [2, 1, 640]> c_in, tensor<fp32, [2, 1, 640]> h_in, tensor<int32, [1]> target_lengths, tensor<int32, [1, ?]> targets) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, list<tensor<int32, [2]>, ?>>>>((("DefaultShapes", {{"targets", [1, 1]}}), ("RangeDims", {{"targets", [[1, 1], [1, 1000]]}})))] {
            tensor<int32, []> input_axis_0 = const()[name = tensor<string, []>("input_axis_0"), val = tensor<int32, []>(0)];
            tensor<fp16, [8193, 640]> embed_weight_to_fp16 = const()[name = tensor<string, []>("embed_weight_to_fp16"), val = tensor<fp16, [8193, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
            tensor<fp16, [1, ?, 640]> input_cast_fp16 = gather(axis = input_axis_0, indices = targets, x = embed_weight_to_fp16)[name = tensor<string, []>("input_cast_fp16")];
            tensor<string, []> input_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("input_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<int32, []> split_0_num_splits_0 = const()[name = tensor<string, []>("split_0_num_splits_0"), val = tensor<int32, []>(2)];
            tensor<int32, []> split_0_axis_0 = const()[name = tensor<string, []>("split_0_axis_0"), val = tensor<int32, []>(0)];
            tensor<string, []> h_in_to_fp16_dtype_0 = const()[name = tensor<string, []>("h_in_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
            tensor<fp16, [2, 1, 640]> h_in_to_fp16 = cast(dtype = h_in_to_fp16_dtype_0, x = h_in)[name = tensor<string, []>("cast_12")];
            tensor<fp16, [1, 1, 640]> split_0_cast_fp16_0, tensor<fp16, [1, 1, 640]> split_0_cast_fp16_1 = split(axis = split_0_axis_0, num_splits = split_0_num_splits_0, x = h_in_to_fp16)[name = tensor<string, []>("split_0_cast_fp16")];
            tensor<int32, []> split_1_num_splits_0 = const()[name = tensor<string, []>("split_1_num_splits_0"), val = tensor<int32, []>(2)];
            tensor<int32, []> split_1_axis_0 = const()[name = tensor<string, []>("split_1_axis_0"), val = tensor<int32, []>(0)];
            tensor<string, []> c_in_to_fp16_dtype_0 = const()[name = tensor<string, []>("c_in_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
            tensor<fp16, [2, 1, 640]> c_in_to_fp16 = cast(dtype = c_in_to_fp16_dtype_0, x = c_in)[name = tensor<string, []>("cast_11")];
            tensor<fp16, [1, 1, 640]> split_1_cast_fp16_0, tensor<fp16, [1, 1, 640]> split_1_cast_fp16_1 = split(axis = split_1_axis_0, num_splits = split_1_num_splits_0, x = c_in_to_fp16)[name = tensor<string, []>("split_1_cast_fp16")];
            tensor<fp32, [2560]> concat_0 = const()[name = tensor<string, []>("concat_0"), val = tensor<fp32, [2560]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10487168)))];
            tensor<fp32, [2560, 640]> concat_1 = const()[name = tensor<string, []>("concat_1"), val = tensor<fp32, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10497472)))];
            tensor<fp32, [2560, 640]> concat_2 = const()[name = tensor<string, []>("concat_2"), val = tensor<fp32, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17051136)))];
            tensor<int32, [1]> var_25_lstm_layer_0_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
            tensor<fp16, [1, 640]> var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_layer_0_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_0)[name = tensor<string, []>("op_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16")];
            tensor<string, []> var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<int32, [1]> var_25_lstm_layer_0_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
            tensor<fp16, [1, 640]> var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_layer_0_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_0)[name = tensor<string, []>("op_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16")];
            tensor<string, []> var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<string, []> var_25_lstm_layer_0_direction_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_direction_0"), val = tensor<string, []>("forward")];
            tensor<bool, []> var_25_lstm_layer_0_output_sequence_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_output_sequence_0"), val = tensor<bool, []>(true)];
            tensor<string, []> var_25_lstm_layer_0_recurrent_activation_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
            tensor<string, []> var_25_lstm_layer_0_cell_activation_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_cell_activation_0"), val = tensor<string, []>("tanh")];
            tensor<string, []> var_25_lstm_layer_0_activation_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_activation_0"), val = tensor<string, []>("tanh")];
            tensor<fp32, [1, 640]> var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32 = cast(dtype = var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16)[name = tensor<string, []>("cast_9")];
            tensor<fp32, [1, 640]> var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32 = cast(dtype = var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16)[name = tensor<string, []>("cast_10")];
            tensor<fp32, [1, ?, 640]> input_cast_fp16_to_fp32 = cast(dtype = input_cast_fp16_to_fp32_dtype_0, x = input_cast_fp16)[name = tensor<string, []>("cast_13")];
            tensor<fp32, [1, ?, 640]> var_25_lstm_layer_0_0, tensor<fp32, [?, 640]> var_25_lstm_layer_0_1, tensor<fp32, [?, 640]> var_25_lstm_layer_0_2 = lstm(activation = var_25_lstm_layer_0_activation_0, bias = concat_0, cell_activation = var_25_lstm_layer_0_cell_activation_0, direction = var_25_lstm_layer_0_direction_0, initial_c = var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32, initial_h = var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32, output_sequence = var_25_lstm_layer_0_output_sequence_0, recurrent_activation = var_25_lstm_layer_0_recurrent_activation_0, weight_hh = concat_2, weight_ih = concat_1, x = input_cast_fp16_to_fp32)[name = tensor<string, []>("op_25_lstm_layer_0")];
            tensor<fp32, [2560]> concat_3 = const()[name = tensor<string, []>("concat_3"), val = tensor<fp32, [2560]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23604800)))];
            tensor<fp32, [2560, 640]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<fp32, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23615104)))];
            tensor<fp32, [2560, 640]> concat_5 = const()[name = tensor<string, []>("concat_5"), val = tensor<fp32, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30168768)))];
            tensor<int32, [1]> var_25_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("op_25_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
            tensor<fp16, [1, 640]> var_25_lstm_h0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_1)[name = tensor<string, []>("op_25_lstm_h0_squeeze_cast_fp16")];
            tensor<string, []> var_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<int32, [1]> var_25_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("op_25_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
            tensor<fp16, [1, 640]> var_25_lstm_c0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_1)[name = tensor<string, []>("op_25_lstm_c0_squeeze_cast_fp16")];
            tensor<string, []> var_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<string, []> var_25_direction_0 = const()[name = tensor<string, []>("op_25_direction_0"), val = tensor<string, []>("forward")];
            tensor<bool, []> var_25_output_sequence_0 = const()[name = tensor<string, []>("op_25_output_sequence_0"), val = tensor<bool, []>(true)];
            tensor<string, []> var_25_recurrent_activation_0 = const()[name = tensor<string, []>("op_25_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
            tensor<string, []> var_25_cell_activation_0 = const()[name = tensor<string, []>("op_25_cell_activation_0"), val = tensor<string, []>("tanh")];
            tensor<string, []> var_25_activation_0 = const()[name = tensor<string, []>("op_25_activation_0"), val = tensor<string, []>("tanh")];
            tensor<fp32, [1, 640]> var_25_lstm_c0_squeeze_cast_fp16_to_fp32 = cast(dtype = var_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_c0_squeeze_cast_fp16)[name = tensor<string, []>("cast_7")];
            tensor<fp32, [1, 640]> var_25_lstm_h0_squeeze_cast_fp16_to_fp32 = cast(dtype = var_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_h0_squeeze_cast_fp16)[name = tensor<string, []>("cast_8")];
            tensor<fp32, [1, ?, 640]> decoder_output, tensor<fp32, [?, 640]> var_25_1, tensor<fp32, [?, 640]> var_25_2 = lstm(activation = var_25_activation_0, bias = concat_3, cell_activation = var_25_cell_activation_0, direction = var_25_direction_0, initial_c = var_25_lstm_c0_squeeze_cast_fp16_to_fp32, initial_h = var_25_lstm_h0_squeeze_cast_fp16_to_fp32, output_sequence = var_25_output_sequence_0, recurrent_activation = var_25_recurrent_activation_0, weight_hh = concat_5, weight_ih = concat_4, x = var_25_lstm_layer_0_0)[name = tensor<string, []>("op_25")];
            tensor<int32, []> var_26_axis_0 = const()[name = tensor<string, []>("op_26_axis_0"), val = tensor<int32, []>(0)];
            tensor<string, []> var_25_lstm_layer_0_1_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_1_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
            tensor<string, []> var_25_1_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_25_1_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
            tensor<fp16, [?, 640]> var_25_1_to_fp16 = cast(dtype = var_25_1_to_fp16_dtype_0, x = var_25_1)[name = tensor<string, []>("cast_5")];
            tensor<fp16, [?, 640]> var_25_lstm_layer_0_1_to_fp16 = cast(dtype = var_25_lstm_layer_0_1_to_fp16_dtype_0, x = var_25_lstm_layer_0_1)[name = tensor<string, []>("cast_6")];
            tensor<fp16, [2, ?, 640]> var_26_cast_fp16 = stack(axis = var_26_axis_0, values = (var_25_lstm_layer_0_1_to_fp16, var_25_1_to_fp16))[name = tensor<string, []>("op_26_cast_fp16")];
            tensor<string, []> var_26_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_26_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<int32, []> var_27_axis_0 = const()[name = tensor<string, []>("op_27_axis_0"), val = tensor<int32, []>(0)];
            tensor<string, []> var_25_lstm_layer_0_2_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_2_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
            tensor<string, []> var_25_2_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_25_2_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
            tensor<fp16, [?, 640]> var_25_2_to_fp16 = cast(dtype = var_25_2_to_fp16_dtype_0, x = var_25_2)[name = tensor<string, []>("cast_2")];
            tensor<fp16, [?, 640]> var_25_lstm_layer_0_2_to_fp16 = cast(dtype = var_25_lstm_layer_0_2_to_fp16_dtype_0, x = var_25_lstm_layer_0_2)[name = tensor<string, []>("cast_3")];
            tensor<fp16, [2, ?, 640]> var_27_cast_fp16 = stack(axis = var_27_axis_0, values = (var_25_lstm_layer_0_2_to_fp16, var_25_2_to_fp16))[name = tensor<string, []>("op_27_cast_fp16")];
            tensor<string, []> var_27_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_27_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<fp32, [2, ?, 640]> c_out = cast(dtype = var_27_cast_fp16_to_fp32_dtype_0, x = var_27_cast_fp16)[name = tensor<string, []>("cast_1")];
            tensor<fp32, [2, ?, 640]> h_out = cast(dtype = var_26_cast_fp16_to_fp32_dtype_0, x = var_26_cast_fp16)[name = tensor<string, []>("cast_4")];
            tensor<int32, [1]> target_lengths_tmp = identity(x = target_lengths)[name = tensor<string, []>("target_lengths_tmp")];
        } -> (decoder_output, h_out, c_out);
}