Upload folder using huggingface_hub
Browse files- .DS_Store +0 -0
- Melspectogram.mlmodelc/analytics/coremldata.bin +3 -0
- Melspectogram.mlmodelc/coremldata.bin +3 -0
- Melspectogram.mlmodelc/model.mil +157 -0
- Melspectogram.mlmodelc/weights/weight.bin +3 -0
- Melspectrogram_v2.mlmodelc/analytics/coremldata.bin +3 -0
- Melspectrogram_v2.mlmodelc/coremldata.bin +3 -0
- Melspectrogram_v2.mlmodelc/metadata.json +108 -0
- Melspectrogram_v2.mlmodelc/model.mil +177 -0
- Melspectrogram_v2.mlmodelc/weights/weight.bin +3 -0
- ParakeetDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- ParakeetDecoder.mlmodelc/coremldata.bin +3 -0
- ParakeetDecoder.mlmodelc/model.mil +72 -0
- ParakeetDecoder.mlmodelc/weights/weight.bin +3 -0
- ParakeetEncoder_v2.mlmodelc/analytics/coremldata.bin +3 -0
- ParakeetEncoder_v2.mlmodelc/coremldata.bin +3 -0
- ParakeetEncoder_v2.mlmodelc/metadata.json +103 -0
- ParakeetEncoder_v2.mlmodelc/model.mil +0 -0
- ParakeetEncoder_v2.mlmodelc/weights/weight.bin +3 -0
- README.md +56 -3
- RNNTJoint.mlmodelc/analytics/coremldata.bin +3 -0
- RNNTJoint.mlmodelc/coremldata.bin +3 -0
- RNNTJoint.mlmodelc/model.mil +31 -0
- RNNTJoint.mlmodelc/weights/weight.bin +3 -0
- TokenDurationPrediction.mlmodelc/analytics/coremldata.bin +3 -0
- TokenDurationPrediction.mlmodelc/coremldata.bin +3 -0
- TokenDurationPrediction.mlmodelc/metadata.json +85 -0
- TokenDurationPrediction.mlmodelc/model.mil +25 -0
- config.json +1 -0
- parakeet_vocab.json +1033 -0
.DS_Store
ADDED
Binary file (10.2 kB). View file
|
|
Melspectogram.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6271a1b89644607c3ab203f79b33c86a286c041c75cb9c203332322223a398d3
|
3 |
+
size 243
|
Melspectogram.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:19e161ad38b48aee83ab23e94a639fd2e4fc49aacbed47086c832bcfa65645c9
|
3 |
+
size 396
|
Melspectogram.mlmodelc/model.mil
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3405.2.1"}, {"coremltools-component-torch", "2.4.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios15>(tensor<int32, [1]> audio_length, tensor<fp32, [1, ?]> audio_signal) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, list<tensor<int32, [2]>, ?>>>>((("DefaultShapes", {{"audio_signal", [1, 1]}}), ("RangeDims", {{"audio_signal", [[1, 1], [1, 160000]]}})))] {
|
5 |
+
tensor<int32, []> var_6 = const()[name = tensor<string, []>("op_6"), val = tensor<int32, []>(512)];
|
6 |
+
tensor<int32, [1]> var_7 = add(x = audio_length, y = var_6)[name = tensor<string, []>("op_7")];
|
7 |
+
tensor<int32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<int32, []>(512)];
|
8 |
+
tensor<int32, [1]> var_10 = sub(x = var_7, y = var_9)[name = tensor<string, []>("op_10")];
|
9 |
+
tensor<int32, []> var_11 = const()[name = tensor<string, []>("op_11"), val = tensor<int32, []>(160)];
|
10 |
+
tensor<int32, [1]> floor_div_0 = floor_div(x = var_10, y = var_11)[name = tensor<string, []>("floor_div_0")];
|
11 |
+
tensor<string, []> var_12_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_12_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
12 |
+
tensor<fp16, []> var_14_promoted_to_fp16 = const()[name = tensor<string, []>("op_14_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
13 |
+
tensor<fp16, [1]> cast_15 = cast(dtype = var_12_to_fp16_dtype_0, x = floor_div_0)[name = tensor<string, []>("cast_15")];
|
14 |
+
tensor<fp16, [1]> seq_len_1_cast_fp16 = add(x = cast_15, y = var_14_promoted_to_fp16)[name = tensor<string, []>("seq_len_1_cast_fp16")];
|
15 |
+
tensor<string, []> seq_len_dtype_0 = const()[name = tensor<string, []>("seq_len_dtype_0"), val = tensor<string, []>("int32")];
|
16 |
+
tensor<int32, [2]> var_28_begin_0 = const()[name = tensor<string, []>("op_28_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
17 |
+
tensor<int32, [2]> var_28_end_0 = const()[name = tensor<string, []>("op_28_end_0"), val = tensor<int32, [2]>([1, 1])];
|
18 |
+
tensor<bool, [2]> var_28_end_mask_0 = const()[name = tensor<string, []>("op_28_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
19 |
+
tensor<bool, [2]> var_28_squeeze_mask_0 = const()[name = tensor<string, []>("op_28_squeeze_mask_0"), val = tensor<bool, [2]>([false, true])];
|
20 |
+
tensor<string, []> audio_signal_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_signal_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
21 |
+
tensor<fp16, [1, ?]> cast_13 = cast(dtype = audio_signal_to_fp16_dtype_0, x = audio_signal)[name = tensor<string, []>("cast_13")];
|
22 |
+
tensor<fp16, [1]> var_28_cast_fp16 = slice_by_index(begin = var_28_begin_0, end = var_28_end_0, end_mask = var_28_end_mask_0, squeeze_mask = var_28_squeeze_mask_0, x = cast_13)[name = tensor<string, []>("op_28_cast_fp16")];
|
23 |
+
tensor<int32, [1]> var_30_axes_0 = const()[name = tensor<string, []>("op_30_axes_0"), val = tensor<int32, [1]>([1])];
|
24 |
+
tensor<fp16, [1, 1]> var_30_cast_fp16 = expand_dims(axes = var_30_axes_0, x = var_28_cast_fp16)[name = tensor<string, []>("op_30_cast_fp16")];
|
25 |
+
tensor<int32, [2]> var_40_begin_0 = const()[name = tensor<string, []>("op_40_begin_0"), val = tensor<int32, [2]>([0, 1])];
|
26 |
+
tensor<int32, [2]> var_40_end_0 = const()[name = tensor<string, []>("op_40_end_0"), val = tensor<int32, [2]>([1, 0])];
|
27 |
+
tensor<bool, [2]> var_40_end_mask_0 = const()[name = tensor<string, []>("op_40_end_mask_0"), val = tensor<bool, [2]>([true, true])];
|
28 |
+
tensor<fp16, [1, ?]> var_40_cast_fp16 = slice_by_index(begin = var_40_begin_0, end = var_40_end_0, end_mask = var_40_end_mask_0, x = cast_13)[name = tensor<string, []>("op_40_cast_fp16")];
|
29 |
+
tensor<int32, [2]> var_50_begin_0 = const()[name = tensor<string, []>("op_50_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
30 |
+
tensor<int32, [2]> var_50_end_0 = const()[name = tensor<string, []>("op_50_end_0"), val = tensor<int32, [2]>([1, -1])];
|
31 |
+
tensor<bool, [2]> var_50_end_mask_0 = const()[name = tensor<string, []>("op_50_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
32 |
+
tensor<fp16, [1, ?]> var_50_cast_fp16 = slice_by_index(begin = var_50_begin_0, end = var_50_end_0, end_mask = var_50_end_mask_0, x = cast_13)[name = tensor<string, []>("op_50_cast_fp16")];
|
33 |
+
tensor<fp16, []> var_51_to_fp16 = const()[name = tensor<string, []>("op_51_to_fp16"), val = tensor<fp16, []>(0x1.f0cp-1)];
|
34 |
+
tensor<fp16, [1, ?]> var_52_cast_fp16 = mul(x = var_50_cast_fp16, y = var_51_to_fp16)[name = tensor<string, []>("op_52_cast_fp16")];
|
35 |
+
tensor<fp16, [1, ?]> var_54_cast_fp16 = sub(x = var_40_cast_fp16, y = var_52_cast_fp16)[name = tensor<string, []>("op_54_cast_fp16")];
|
36 |
+
tensor<int32, []> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<int32, []>(1)];
|
37 |
+
tensor<bool, []> input_1_interleave_0 = const()[name = tensor<string, []>("input_1_interleave_0"), val = tensor<bool, []>(false)];
|
38 |
+
tensor<fp16, [1, ?]> input_1_cast_fp16 = concat(axis = var_56, interleave = input_1_interleave_0, values = (var_30_cast_fp16, var_54_cast_fp16))[name = tensor<string, []>("input_1_cast_fp16")];
|
39 |
+
tensor<int32, [3]> concat_0x = const()[name = tensor<string, []>("concat_0x"), val = tensor<int32, [3]>([1, 1, -1])];
|
40 |
+
tensor<fp16, [1, 1, ?]> input_3_cast_fp16 = reshape(shape = concat_0x, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
41 |
+
tensor<int32, [6]> input_5_pad_0 = const()[name = tensor<string, []>("input_5_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 256, 256])];
|
42 |
+
tensor<string, []> input_5_mode_0 = const()[name = tensor<string, []>("input_5_mode_0"), val = tensor<string, []>("reflect")];
|
43 |
+
tensor<fp16, []> const_0_to_fp16 = const()[name = tensor<string, []>("const_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
44 |
+
tensor<fp16, [1, 1, ?]> input_5_cast_fp16 = pad(constant_val = const_0_to_fp16, mode = input_5_mode_0, pad = input_5_pad_0, x = input_3_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
45 |
+
tensor<int32, [2]> concat_1x = const()[name = tensor<string, []>("concat_1x"), val = tensor<int32, [2]>([1, -1])];
|
46 |
+
tensor<fp16, [1, ?]> input_cast_fp16 = reshape(shape = concat_1x, x = input_5_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
47 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
48 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
49 |
+
tensor<fp16, [1, 1, ?]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
|
50 |
+
tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
|
51 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
52 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
53 |
+
tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
|
54 |
+
tensor<fp16, [257, 1, 512]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
55 |
+
tensor<fp16, [1, 257, ?]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
|
56 |
+
tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
|
57 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
58 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
59 |
+
tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
|
60 |
+
tensor<fp16, [257, 1, 512]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(263296)))];
|
61 |
+
tensor<fp16, [1, 257, ?]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
|
62 |
+
tensor<int32, []> stack_0_axis_0 = const()[name = tensor<string, []>("stack_0_axis_0"), val = tensor<int32, []>(-1)];
|
63 |
+
tensor<fp16, [1, 257, ?, 2]> stack_0_cast_fp16 = stack(axis = stack_0_axis_0, values = (conv_0_cast_fp16, conv_1_cast_fp16))[name = tensor<string, []>("stack_0_cast_fp16")];
|
64 |
+
tensor<fp16, []> var_93_promoted_to_fp16 = const()[name = tensor<string, []>("op_93_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
|
65 |
+
tensor<fp16, [1, 257, ?, 2]> var_94_cast_fp16 = pow(x = stack_0_cast_fp16, y = var_93_promoted_to_fp16)[name = tensor<string, []>("op_94_cast_fp16")];
|
66 |
+
tensor<int32, [1]> var_96 = const()[name = tensor<string, []>("op_96"), val = tensor<int32, [1]>([-1])];
|
67 |
+
tensor<bool, []> var_97 = const()[name = tensor<string, []>("op_97"), val = tensor<bool, []>(false)];
|
68 |
+
tensor<fp16, [1, 257, ?]> var_99_cast_fp16 = reduce_sum(axes = var_96, keep_dims = var_97, x = var_94_cast_fp16)[name = tensor<string, []>("op_99_cast_fp16")];
|
69 |
+
tensor<fp16, [1, 257, ?]> x_7_cast_fp16 = identity(x = var_99_cast_fp16)[name = tensor<string, []>("x_7_cast_fp16")];
|
70 |
+
tensor<bool, []> x_9_transpose_x_0 = const()[name = tensor<string, []>("x_9_transpose_x_0"), val = tensor<bool, []>(false)];
|
71 |
+
tensor<bool, []> x_9_transpose_y_0 = const()[name = tensor<string, []>("x_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
72 |
+
tensor<fp16, [1, 128, 257]> filterbanks_to_fp16 = const()[name = tensor<string, []>("filterbanks_to_fp16"), val = tensor<fp16, [1, 128, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(526528)))];
|
73 |
+
tensor<fp16, [1, 128, ?]> x_9_cast_fp16 = matmul(transpose_x = x_9_transpose_x_0, transpose_y = x_9_transpose_y_0, x = filterbanks_to_fp16, y = x_7_cast_fp16)[name = tensor<string, []>("x_9_cast_fp16")];
|
74 |
+
tensor<fp16, []> var_108_to_fp16 = const()[name = tensor<string, []>("op_108_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
75 |
+
tensor<fp16, [1, 128, ?]> var_109_cast_fp16 = add(x = x_9_cast_fp16, y = var_108_to_fp16)[name = tensor<string, []>("op_109_cast_fp16")];
|
76 |
+
tensor<fp16, []> x_11_epsilon_0_to_fp16 = const()[name = tensor<string, []>("x_11_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
77 |
+
tensor<fp16, [1, 128, ?]> x_11_cast_fp16 = log(epsilon = x_11_epsilon_0_to_fp16, x = var_109_cast_fp16)[name = tensor<string, []>("x_11_cast_fp16")];
|
78 |
+
tensor<int32, []> var_114 = const()[name = tensor<string, []>("op_114"), val = tensor<int32, []>(1)];
|
79 |
+
tensor<int32, [3]> var_116_shape_cast_fp16 = shape(x = x_11_cast_fp16)[name = tensor<string, []>("op_116_shape_cast_fp16")];
|
80 |
+
tensor<int32, []> gather_5_indices_0 = const()[name = tensor<string, []>("gather_5_indices_0"), val = tensor<int32, []>(2)];
|
81 |
+
tensor<int32, []> gather_5_axis_0 = const()[name = tensor<string, []>("gather_5_axis_0"), val = tensor<int32, []>(0)];
|
82 |
+
tensor<int32, []> gather_5 = gather(axis = gather_5_axis_0, indices = gather_5_indices_0, x = var_116_shape_cast_fp16)[name = tensor<string, []>("gather_5")];
|
83 |
+
tensor<int32, []> const_1 = const()[name = tensor<string, []>("const_1"), val = tensor<int32, []>(0)];
|
84 |
+
tensor<int32, []> const_2 = const()[name = tensor<string, []>("const_2"), val = tensor<int32, []>(1)];
|
85 |
+
tensor<int32, [?]> var_124 = range_1d(end = gather_5, start = const_1, step = const_2)[name = tensor<string, []>("op_124")];
|
86 |
+
tensor<int32, [1]> var_126_axes_0 = const()[name = tensor<string, []>("op_126_axes_0"), val = tensor<int32, [1]>([0])];
|
87 |
+
tensor<int32, [1, ?]> var_126 = expand_dims(axes = var_126_axes_0, x = var_124)[name = tensor<string, []>("op_126")];
|
88 |
+
tensor<int32, [2]> shape_0 = shape(x = var_126)[name = tensor<string, []>("shape_0")];
|
89 |
+
tensor<int32, []> concat_2_axis_0 = const()[name = tensor<string, []>("concat_2_axis_0"), val = tensor<int32, []>(0)];
|
90 |
+
tensor<bool, []> concat_2_interleave_0 = const()[name = tensor<string, []>("concat_2_interleave_0"), val = tensor<bool, []>(false)];
|
91 |
+
tensor<int32, [2]> concat_2 = concat(axis = concat_2_axis_0, interleave = concat_2_interleave_0, values = (var_114, gather_5))[name = tensor<string, []>("concat_2")];
|
92 |
+
tensor<int32, [2]> real_div_0 = real_div(x = concat_2, y = shape_0)[name = tensor<string, []>("real_div_0")];
|
93 |
+
tensor<int32, [?, ?]> time_steps = tile(reps = real_div_0, x = var_126)[name = tensor<string, []>("time_steps")];
|
94 |
+
tensor<int32, [1]> var_131_axes_0 = const()[name = tensor<string, []>("op_131_axes_0"), val = tensor<int32, [1]>([1])];
|
95 |
+
tensor<int32, [1]> melspectogram_length = cast(dtype = seq_len_dtype_0, x = seq_len_1_cast_fp16)[name = tensor<string, []>("cast_14")];
|
96 |
+
tensor<int32, [1, 1]> var_131 = expand_dims(axes = var_131_axes_0, x = melspectogram_length)[name = tensor<string, []>("op_131")];
|
97 |
+
tensor<bool, [?, ?]> valid_mask = less(x = time_steps, y = var_131)[name = tensor<string, []>("valid_mask")];
|
98 |
+
tensor<int32, [1]> var_134_axes_0 = const()[name = tensor<string, []>("op_134_axes_0"), val = tensor<int32, [1]>([1])];
|
99 |
+
tensor<bool, [?, 1, ?]> var_134 = expand_dims(axes = var_134_axes_0, x = valid_mask)[name = tensor<string, []>("op_134")];
|
100 |
+
tensor<fp16, []> var_135_to_fp16 = const()[name = tensor<string, []>("op_135_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
101 |
+
tensor<fp16, [1, 128, ?]> var_136_cast_fp16 = select(a = x_11_cast_fp16, b = var_135_to_fp16, cond = var_134)[name = tensor<string, []>("op_136_cast_fp16")];
|
102 |
+
tensor<int32, [1]> var_138 = const()[name = tensor<string, []>("op_138"), val = tensor<int32, [1]>([2])];
|
103 |
+
tensor<bool, []> var_139 = const()[name = tensor<string, []>("op_139"), val = tensor<bool, []>(false)];
|
104 |
+
tensor<fp16, [1, 128]> x_mean_numerator_cast_fp16 = reduce_sum(axes = var_138, keep_dims = var_139, x = var_136_cast_fp16)[name = tensor<string, []>("x_mean_numerator_cast_fp16")];
|
105 |
+
tensor<int32, [1]> var_143 = const()[name = tensor<string, []>("op_143"), val = tensor<int32, [1]>([1])];
|
106 |
+
tensor<bool, []> var_144 = const()[name = tensor<string, []>("op_144"), val = tensor<bool, []>(false)];
|
107 |
+
tensor<string, []> cast_3_to_fp16_dtype_0 = const()[name = tensor<string, []>("cast_3_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
108 |
+
tensor<fp16, [?, ?]> cast_12 = cast(dtype = cast_3_to_fp16_dtype_0, x = valid_mask)[name = tensor<string, []>("cast_12")];
|
109 |
+
tensor<fp16, [?]> x_mean_denominator_cast_fp16 = reduce_sum(axes = var_143, keep_dims = var_144, x = cast_12)[name = tensor<string, []>("x_mean_denominator_cast_fp16")];
|
110 |
+
tensor<int32, [1]> var_148_axes_0 = const()[name = tensor<string, []>("op_148_axes_0"), val = tensor<int32, [1]>([1])];
|
111 |
+
tensor<fp16, [?, 1]> var_148_cast_fp16 = expand_dims(axes = var_148_axes_0, x = x_mean_denominator_cast_fp16)[name = tensor<string, []>("op_148_cast_fp16")];
|
112 |
+
tensor<fp16, [?, 128]> x_mean_cast_fp16 = real_div(x = x_mean_numerator_cast_fp16, y = var_148_cast_fp16)[name = tensor<string, []>("x_mean_cast_fp16")];
|
113 |
+
tensor<int32, [1]> var_153_axes_0 = const()[name = tensor<string, []>("op_153_axes_0"), val = tensor<int32, [1]>([2])];
|
114 |
+
tensor<fp16, [?, 128, 1]> var_153_cast_fp16 = expand_dims(axes = var_153_axes_0, x = x_mean_cast_fp16)[name = tensor<string, []>("op_153_cast_fp16")];
|
115 |
+
tensor<fp16, [?, 128, ?]> var_155_cast_fp16 = sub(x = x_11_cast_fp16, y = var_153_cast_fp16)[name = tensor<string, []>("op_155_cast_fp16")];
|
116 |
+
tensor<fp16, []> var_156_to_fp16 = const()[name = tensor<string, []>("op_156_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
117 |
+
tensor<fp16, [?, 128, ?]> var_157_cast_fp16 = select(a = var_155_cast_fp16, b = var_156_to_fp16, cond = var_134)[name = tensor<string, []>("op_157_cast_fp16")];
|
118 |
+
tensor<fp16, []> var_158_promoted_to_fp16 = const()[name = tensor<string, []>("op_158_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
|
119 |
+
tensor<fp16, [?, 128, ?]> var_159_cast_fp16 = pow(x = var_157_cast_fp16, y = var_158_promoted_to_fp16)[name = tensor<string, []>("op_159_cast_fp16")];
|
120 |
+
tensor<int32, [1]> var_161 = const()[name = tensor<string, []>("op_161"), val = tensor<int32, [1]>([2])];
|
121 |
+
tensor<bool, []> var_162 = const()[name = tensor<string, []>("op_162"), val = tensor<bool, []>(false)];
|
122 |
+
tensor<fp16, [?, 128]> var_164_cast_fp16 = reduce_sum(axes = var_161, keep_dims = var_162, x = var_159_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
123 |
+
tensor<fp16, []> var_168_to_fp16 = const()[name = tensor<string, []>("op_168_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
124 |
+
tensor<fp16, [?, 1]> var_169_cast_fp16 = sub(x = var_148_cast_fp16, y = var_168_to_fp16)[name = tensor<string, []>("op_169_cast_fp16")];
|
125 |
+
tensor<fp16, [?, 128]> var_170_cast_fp16 = real_div(x = var_164_cast_fp16, y = var_169_cast_fp16)[name = tensor<string, []>("op_170_cast_fp16")];
|
126 |
+
tensor<fp16, [?, 128]> x_std_1_cast_fp16 = sqrt(x = var_170_cast_fp16)[name = tensor<string, []>("x_std_1_cast_fp16")];
|
127 |
+
tensor<fp16, []> var_172_to_fp16 = const()[name = tensor<string, []>("op_172_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
128 |
+
tensor<fp16, [?, 128]> x_std_cast_fp16 = add(x = x_std_1_cast_fp16, y = var_172_to_fp16)[name = tensor<string, []>("x_std_cast_fp16")];
|
129 |
+
tensor<int32, [1]> var_180_axes_0 = const()[name = tensor<string, []>("op_180_axes_0"), val = tensor<int32, [1]>([2])];
|
130 |
+
tensor<fp16, [?, 128, 1]> var_180_cast_fp16 = expand_dims(axes = var_180_axes_0, x = x_std_cast_fp16)[name = tensor<string, []>("op_180_cast_fp16")];
|
131 |
+
tensor<fp16, [?, 128, ?]> x_cast_fp16 = real_div(x = var_155_cast_fp16, y = var_180_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
|
132 |
+
tensor<int32, [3]> var_183_shape_cast_fp16 = shape(x = x_cast_fp16)[name = tensor<string, []>("op_183_shape_cast_fp16")];
|
133 |
+
tensor<int32, []> gather_6_indices_0 = const()[name = tensor<string, []>("gather_6_indices_0"), val = tensor<int32, []>(-1)];
|
134 |
+
tensor<int32, []> gather_6_axis_0 = const()[name = tensor<string, []>("gather_6_axis_0"), val = tensor<int32, []>(0)];
|
135 |
+
tensor<int32, []> gather_6 = gather(axis = gather_6_axis_0, indices = gather_6_indices_0, x = var_183_shape_cast_fp16)[name = tensor<string, []>("gather_6")];
|
136 |
+
tensor<int32, []> const_3 = const()[name = tensor<string, []>("const_3"), val = tensor<int32, []>(0)];
|
137 |
+
tensor<int32, []> const_4 = const()[name = tensor<string, []>("const_4"), val = tensor<int32, []>(1)];
|
138 |
+
tensor<int32, [?]> mask_1 = range_1d(end = gather_6, start = const_3, step = const_4)[name = tensor<string, []>("mask_1")];
|
139 |
+
tensor<int32, []> gather_7_indices_0 = const()[name = tensor<string, []>("gather_7_indices_0"), val = tensor<int32, []>(0)];
|
140 |
+
tensor<int32, []> gather_7_axis_0 = const()[name = tensor<string, []>("gather_7_axis_0"), val = tensor<int32, []>(0)];
|
141 |
+
tensor<int32, []> gather_7 = gather(axis = gather_7_axis_0, indices = gather_7_indices_0, x = var_183_shape_cast_fp16)[name = tensor<string, []>("gather_7")];
|
142 |
+
tensor<int32, []> var_195 = const()[name = tensor<string, []>("op_195"), val = tensor<int32, []>(1)];
|
143 |
+
tensor<int32, []> concat_3_axis_0 = const()[name = tensor<string, []>("concat_3_axis_0"), val = tensor<int32, []>(0)];
|
144 |
+
tensor<bool, []> concat_3_interleave_0 = const()[name = tensor<string, []>("concat_3_interleave_0"), val = tensor<bool, []>(false)];
|
145 |
+
tensor<int32, [2]> concat_3 = concat(axis = concat_3_axis_0, interleave = concat_3_interleave_0, values = (gather_7, var_195))[name = tensor<string, []>("concat_3")];
|
146 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
147 |
+
tensor<int32, [1, ?]> expand_dims_0 = expand_dims(axes = expand_dims_0_axes_0, x = mask_1)[name = tensor<string, []>("expand_dims_0")];
|
148 |
+
tensor<int32, [?, ?]> var_197 = tile(reps = concat_3, x = expand_dims_0)[name = tensor<string, []>("op_197")];
|
149 |
+
tensor<bool, [?, ?]> mask = greater_equal(x = var_197, y = var_131)[name = tensor<string, []>("mask")];
|
150 |
+
tensor<int32, [1]> var_202_axes_0 = const()[name = tensor<string, []>("op_202_axes_0"), val = tensor<int32, [1]>([1])];
|
151 |
+
tensor<bool, [?, 1, ?]> var_202 = expand_dims(axes = var_202_axes_0, x = mask)[name = tensor<string, []>("op_202")];
|
152 |
+
tensor<fp16, []> var_216_to_fp16 = const()[name = tensor<string, []>("op_216_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
153 |
+
tensor<fp16, [?, 128, ?]> var_217_cast_fp16 = select(a = var_216_to_fp16, b = x_cast_fp16, cond = var_202)[name = tensor<string, []>("op_217_cast_fp16")];
|
154 |
+
tensor<string, []> var_217_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_217_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
155 |
+
tensor<fp32, [?, 128, ?]> melspectogram = cast(dtype = var_217_cast_fp16_to_fp32_dtype_0, x = var_217_cast_fp16)[name = tensor<string, []>("cast_11")];
|
156 |
+
} -> (melspectogram, melspectogram_length);
|
157 |
+
}
|
Melspectogram.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fbb02b5875f7630641c3d6a1fffa9bc73189f87b4c03113333df7e348743888
|
3 |
+
size 592384
|
Melspectrogram_v2.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4300d03c3fc52901866a5f578d2673ec12c95deaf86b2131c83848339212d218
|
3 |
+
size 243
|
Melspectrogram_v2.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96c3a2e963dbc871fda545e9ed452173f58be0d995d2d91693361069b7189ee1
|
3 |
+
size 402
|
Melspectrogram_v2.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[]",
|
13 |
+
"name" : "melspectrogram",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Int32",
|
20 |
+
"formattedType" : "MultiArray (Int32 1)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1]",
|
23 |
+
"name" : "melspectrogram_length",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
}
|
26 |
+
],
|
27 |
+
"modelParameters" : [
|
28 |
+
|
29 |
+
],
|
30 |
+
"specificationVersion" : 8,
|
31 |
+
"mlProgramOperationTypeHistogram" : {
|
32 |
+
"Range1d" : 2,
|
33 |
+
"Ios17.reshape" : 2,
|
34 |
+
"Identity" : 1,
|
35 |
+
"Ios17.matmul" : 1,
|
36 |
+
"Ios17.expandDims" : 10,
|
37 |
+
"Select" : 3,
|
38 |
+
"Ios17.add" : 4,
|
39 |
+
"Tile" : 2,
|
40 |
+
"Ios17.sliceByIndex" : 3,
|
41 |
+
"Ios16.reduceSum" : 4,
|
42 |
+
"Shape" : 3,
|
43 |
+
"Ios17.gather" : 3,
|
44 |
+
"Pad" : 1,
|
45 |
+
"Ios17.log" : 1,
|
46 |
+
"Ios17.conv" : 2,
|
47 |
+
"Ios17.sub" : 4,
|
48 |
+
"Ios17.pow" : 2,
|
49 |
+
"Ios17.cast" : 9,
|
50 |
+
"Ios17.realDiv" : 4,
|
51 |
+
"Stack" : 1,
|
52 |
+
"Ios17.concat" : 3,
|
53 |
+
"Ios17.floorDiv" : 1,
|
54 |
+
"Ios17.less" : 1,
|
55 |
+
"Ios17.clip" : 2,
|
56 |
+
"Ios17.sqrt" : 1,
|
57 |
+
"Ios17.greaterEqual" : 1,
|
58 |
+
"Ios17.mul" : 1
|
59 |
+
},
|
60 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32, UInt16)",
|
61 |
+
"isUpdatable" : "0",
|
62 |
+
"stateSchema" : [
|
63 |
+
|
64 |
+
],
|
65 |
+
"availability" : {
|
66 |
+
"macOS" : "14.0",
|
67 |
+
"tvOS" : "17.0",
|
68 |
+
"visionOS" : "1.0",
|
69 |
+
"watchOS" : "10.0",
|
70 |
+
"iOS" : "17.0",
|
71 |
+
"macCatalyst" : "17.0"
|
72 |
+
},
|
73 |
+
"modelType" : {
|
74 |
+
"name" : "MLModelType_mlProgram"
|
75 |
+
},
|
76 |
+
"userDefinedMetadata" : {
|
77 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
78 |
+
"com.github.apple.coremltools.source" : "torch==2.5.0",
|
79 |
+
"com.github.apple.coremltools.version" : "8.3.0"
|
80 |
+
},
|
81 |
+
"inputSchema" : [
|
82 |
+
{
|
83 |
+
"dataType" : "Float32",
|
84 |
+
"hasShapeFlexibility" : "1",
|
85 |
+
"isOptional" : "0",
|
86 |
+
"shapeFlexibility" : "1 × 160...480000",
|
87 |
+
"shapeRange" : "[[1, 1], [160, 480000]]",
|
88 |
+
"formattedType" : "MultiArray (Float32 1 × 160)",
|
89 |
+
"type" : "MultiArray",
|
90 |
+
"shape" : "[1, 160]",
|
91 |
+
"name" : "audio_signal",
|
92 |
+
"shortDescription" : ""
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"hasShapeFlexibility" : "0",
|
96 |
+
"isOptional" : "0",
|
97 |
+
"dataType" : "Int32",
|
98 |
+
"formattedType" : "MultiArray (Int32 1)",
|
99 |
+
"shortDescription" : "",
|
100 |
+
"shape" : "[1]",
|
101 |
+
"name" : "audio_length",
|
102 |
+
"type" : "MultiArray"
|
103 |
+
}
|
104 |
+
],
|
105 |
+
"generatedClassName" : "FlexibleMelspectrogram_fixed",
|
106 |
+
"method" : "predict"
|
107 |
+
}
|
108 |
+
]
|
Melspectrogram_v2.mlmodelc/model.mil
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.3.0"}})]
|
3 |
+
{
|
4 |
+
func main<ios17>(tensor<int32, [1]> audio_length, tensor<fp32, [1, ?]> audio_signal) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, list<tensor<int32, [2]>, ?>>>>((("DefaultShapes", {{"audio_signal", [1, 160]}}), ("RangeDims", {{"audio_signal", [[1, 1], [160, 480000]]}})))] {
|
5 |
+
tensor<int32, []> var_6 = const()[name = tensor<string, []>("op_6"), val = tensor<int32, []>(512)];
|
6 |
+
tensor<int32, [1]> var_7 = add(x = audio_length, y = var_6)[name = tensor<string, []>("op_7")];
|
7 |
+
tensor<int32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<int32, []>(512)];
|
8 |
+
tensor<int32, [1]> var_10 = sub(x = var_7, y = var_9)[name = tensor<string, []>("op_10")];
|
9 |
+
tensor<int32, []> var_11 = const()[name = tensor<string, []>("op_11"), val = tensor<int32, []>(160)];
|
10 |
+
tensor<int32, [1]> floor_div_0 = floor_div(x = var_10, y = var_11)[name = tensor<string, []>("floor_div_0")];
|
11 |
+
tensor<string, []> var_12_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_12_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
12 |
+
tensor<fp16, []> var_14_promoted_to_fp16 = const()[name = tensor<string, []>("op_14_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
13 |
+
tensor<fp16, [1]> floor_div_0_to_fp16 = cast(dtype = var_12_to_fp16_dtype_0, x = floor_div_0)[name = tensor<string, []>("cast_23")];
|
14 |
+
tensor<fp16, [1]> seq_len_1_cast_fp16 = add(x = floor_div_0_to_fp16, y = var_14_promoted_to_fp16)[name = tensor<string, []>("seq_len_1_cast_fp16")];
|
15 |
+
tensor<string, []> cast_0_dtype_0 = const()[name = tensor<string, []>("cast_0_dtype_0"), val = tensor<string, []>("int32")];
|
16 |
+
tensor<int32, [2]> var_28_begin_0 = const()[name = tensor<string, []>("op_28_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
17 |
+
tensor<int32, [2]> var_28_end_0 = const()[name = tensor<string, []>("op_28_end_0"), val = tensor<int32, [2]>([1, 1])];
|
18 |
+
tensor<bool, [2]> var_28_end_mask_0 = const()[name = tensor<string, []>("op_28_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
19 |
+
tensor<bool, [2]> var_28_squeeze_mask_0 = const()[name = tensor<string, []>("op_28_squeeze_mask_0"), val = tensor<bool, [2]>([false, true])];
|
20 |
+
tensor<string, []> audio_signal_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_signal_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
21 |
+
tensor<fp16, [1, ?]> audio_signal_to_fp16 = cast(dtype = audio_signal_to_fp16_dtype_0, x = audio_signal)[name = tensor<string, []>("cast_21")];
|
22 |
+
tensor<fp16, [1]> var_28_cast_fp16 = slice_by_index(begin = var_28_begin_0, end = var_28_end_0, end_mask = var_28_end_mask_0, squeeze_mask = var_28_squeeze_mask_0, x = audio_signal_to_fp16)[name = tensor<string, []>("op_28_cast_fp16")];
|
23 |
+
tensor<int32, [1]> var_30_axes_0 = const()[name = tensor<string, []>("op_30_axes_0"), val = tensor<int32, [1]>([1])];
|
24 |
+
tensor<fp16, [1, 1]> var_30_cast_fp16 = expand_dims(axes = var_30_axes_0, x = var_28_cast_fp16)[name = tensor<string, []>("op_30_cast_fp16")];
|
25 |
+
tensor<int32, [2]> var_40_begin_0 = const()[name = tensor<string, []>("op_40_begin_0"), val = tensor<int32, [2]>([0, 1])];
|
26 |
+
tensor<int32, [2]> var_40_end_0 = const()[name = tensor<string, []>("op_40_end_0"), val = tensor<int32, [2]>([1, 0])];
|
27 |
+
tensor<bool, [2]> var_40_end_mask_0 = const()[name = tensor<string, []>("op_40_end_mask_0"), val = tensor<bool, [2]>([true, true])];
|
28 |
+
tensor<fp16, [1, ?]> var_40_cast_fp16 = slice_by_index(begin = var_40_begin_0, end = var_40_end_0, end_mask = var_40_end_mask_0, x = audio_signal_to_fp16)[name = tensor<string, []>("op_40_cast_fp16")];
|
29 |
+
tensor<int32, [2]> var_50_begin_0 = const()[name = tensor<string, []>("op_50_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
30 |
+
tensor<int32, [2]> var_50_end_0 = const()[name = tensor<string, []>("op_50_end_0"), val = tensor<int32, [2]>([1, -1])];
|
31 |
+
tensor<bool, [2]> var_50_end_mask_0 = const()[name = tensor<string, []>("op_50_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
32 |
+
tensor<fp16, [1, ?]> var_50_cast_fp16 = slice_by_index(begin = var_50_begin_0, end = var_50_end_0, end_mask = var_50_end_mask_0, x = audio_signal_to_fp16)[name = tensor<string, []>("op_50_cast_fp16")];
|
33 |
+
tensor<fp16, []> var_51_to_fp16 = const()[name = tensor<string, []>("op_51_to_fp16"), val = tensor<fp16, []>(0x1.f0cp-1)];
|
34 |
+
tensor<fp16, [1, ?]> var_52_cast_fp16 = mul(x = var_50_cast_fp16, y = var_51_to_fp16)[name = tensor<string, []>("op_52_cast_fp16")];
|
35 |
+
tensor<fp16, [1, ?]> var_54_cast_fp16 = sub(x = var_40_cast_fp16, y = var_52_cast_fp16)[name = tensor<string, []>("op_54_cast_fp16")];
|
36 |
+
tensor<int32, []> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<int32, []>(1)];
|
37 |
+
tensor<bool, []> input_1_interleave_0 = const()[name = tensor<string, []>("input_1_interleave_0"), val = tensor<bool, []>(false)];
|
38 |
+
tensor<fp16, [1, ?]> input_1_cast_fp16 = concat(axis = var_56, interleave = input_1_interleave_0, values = (var_30_cast_fp16, var_54_cast_fp16))[name = tensor<string, []>("input_1_cast_fp16")];
|
39 |
+
tensor<int32, [3]> concat_0x = const()[name = tensor<string, []>("concat_0x"), val = tensor<int32, [3]>([1, 1, -1])];
|
40 |
+
tensor<fp16, [1, 1, ?]> input_3_cast_fp16 = reshape(shape = concat_0x, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
41 |
+
tensor<int32, [6]> input_5_pad_0 = const()[name = tensor<string, []>("input_5_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 256, 256])];
|
42 |
+
tensor<string, []> input_5_mode_0 = const()[name = tensor<string, []>("input_5_mode_0"), val = tensor<string, []>("reflect")];
|
43 |
+
tensor<fp16, []> const_0_to_fp16 = const()[name = tensor<string, []>("const_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
44 |
+
tensor<fp16, [1, 1, ?]> input_5_cast_fp16 = pad(constant_val = const_0_to_fp16, mode = input_5_mode_0, pad = input_5_pad_0, x = input_3_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
45 |
+
tensor<int32, [2]> concat_1x = const()[name = tensor<string, []>("concat_1x"), val = tensor<int32, [2]>([1, -1])];
|
46 |
+
tensor<fp16, [1, ?]> input_cast_fp16 = reshape(shape = concat_1x, x = input_5_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
47 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
48 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
49 |
+
tensor<fp16, [1, 1, ?]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
|
50 |
+
tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
|
51 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
52 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
53 |
+
tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
|
54 |
+
tensor<fp16, [257, 1, 512]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
55 |
+
tensor<fp16, [1, 257, ?]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
|
56 |
+
tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
|
57 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
58 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
59 |
+
tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
|
60 |
+
tensor<fp16, [257, 1, 512]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(263296)))];
|
61 |
+
tensor<fp16, [1, 257, ?]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
|
62 |
+
tensor<int32, []> stack_0_axis_0 = const()[name = tensor<string, []>("stack_0_axis_0"), val = tensor<int32, []>(-1)];
|
63 |
+
tensor<fp16, [1, 257, ?, 2]> stack_0_cast_fp16 = stack(axis = stack_0_axis_0, values = (conv_0_cast_fp16, conv_1_cast_fp16))[name = tensor<string, []>("stack_0_cast_fp16")];
|
64 |
+
tensor<fp16, []> var_93_promoted_to_fp16 = const()[name = tensor<string, []>("op_93_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
|
65 |
+
tensor<fp16, [1, 257, ?, 2]> var_94_cast_fp16 = pow(x = stack_0_cast_fp16, y = var_93_promoted_to_fp16)[name = tensor<string, []>("op_94_cast_fp16")];
|
66 |
+
tensor<int32, [1]> var_99_axes_0 = const()[name = tensor<string, []>("op_99_axes_0"), val = tensor<int32, [1]>([-1])];
|
67 |
+
tensor<bool, []> var_99_keep_dims_0 = const()[name = tensor<string, []>("op_99_keep_dims_0"), val = tensor<bool, []>(false)];
|
68 |
+
tensor<fp16, [1, 257, ?]> var_99_cast_fp16 = reduce_sum(axes = var_99_axes_0, keep_dims = var_99_keep_dims_0, x = var_94_cast_fp16)[name = tensor<string, []>("op_99_cast_fp16")];
|
69 |
+
tensor<fp16, [1, 257, ?]> x_7_cast_fp16 = identity(x = var_99_cast_fp16)[name = tensor<string, []>("x_7_cast_fp16")];
|
70 |
+
tensor<bool, []> x_9_transpose_x_0 = const()[name = tensor<string, []>("x_9_transpose_x_0"), val = tensor<bool, []>(false)];
|
71 |
+
tensor<bool, []> x_9_transpose_y_0 = const()[name = tensor<string, []>("x_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
72 |
+
tensor<fp16, [1, 128, 257]> filterbanks_to_fp16 = const()[name = tensor<string, []>("filterbanks_to_fp16"), val = tensor<fp16, [1, 128, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(526528)))];
|
73 |
+
tensor<fp16, [1, 128, ?]> x_9_cast_fp16 = matmul(transpose_x = x_9_transpose_x_0, transpose_y = x_9_transpose_y_0, x = filterbanks_to_fp16, y = x_7_cast_fp16)[name = tensor<string, []>("x_9_cast_fp16")];
|
74 |
+
tensor<fp16, []> var_108_to_fp16 = const()[name = tensor<string, []>("op_108_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
75 |
+
tensor<fp16, [1, 128, ?]> var_109_cast_fp16 = add(x = x_9_cast_fp16, y = var_108_to_fp16)[name = tensor<string, []>("op_109_cast_fp16")];
|
76 |
+
tensor<fp32, []> x_11_epsilon_0 = const()[name = tensor<string, []>("x_11_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
77 |
+
tensor<fp16, [1, 128, ?]> x_11_cast_fp16 = log(epsilon = x_11_epsilon_0, x = var_109_cast_fp16)[name = tensor<string, []>("x_11_cast_fp16")];
|
78 |
+
tensor<int32, []> var_114 = const()[name = tensor<string, []>("op_114"), val = tensor<int32, []>(1)];
|
79 |
+
tensor<int32, [3]> var_116_shape_cast_fp16 = shape(x = x_11_cast_fp16)[name = tensor<string, []>("op_116_shape_cast_fp16")];
|
80 |
+
tensor<int32, []> gather_5_axis_0 = const()[name = tensor<string, []>("gather_5_axis_0"), val = tensor<int32, []>(0)];
|
81 |
+
tensor<int32, []> gather_5_batch_dims_0 = const()[name = tensor<string, []>("gather_5_batch_dims_0"), val = tensor<int32, []>(0)];
|
82 |
+
tensor<bool, []> gather_5_validate_indices_0 = const()[name = tensor<string, []>("gather_5_validate_indices_0"), val = tensor<bool, []>(false)];
|
83 |
+
tensor<string, []> var_116_shape_cast_fp16_to_uint16_dtype_0 = const()[name = tensor<string, []>("op_116_shape_cast_fp16_to_uint16_dtype_0"), val = tensor<string, []>("uint16")];
|
84 |
+
tensor<uint16, []> select_5_to_uint16 = const()[name = tensor<string, []>("select_5_to_uint16"), val = tensor<uint16, []>(2)];
|
85 |
+
tensor<uint16, [3]> var_116_shape_cast_fp16_to_uint16 = cast(dtype = var_116_shape_cast_fp16_to_uint16_dtype_0, x = var_116_shape_cast_fp16)[name = tensor<string, []>("cast_20")];
|
86 |
+
tensor<uint16, []> gather_5_cast_uint16 = gather(axis = gather_5_axis_0, batch_dims = gather_5_batch_dims_0, indices = select_5_to_uint16, validate_indices = gather_5_validate_indices_0, x = var_116_shape_cast_fp16_to_uint16)[name = tensor<string, []>("gather_5_cast_uint16")];
|
87 |
+
tensor<string, []> gather_5_cast_uint16_to_int32_dtype_0 = const()[name = tensor<string, []>("gather_5_cast_uint16_to_int32_dtype_0"), val = tensor<string, []>("int32")];
|
88 |
+
tensor<int32, []> const_1 = const()[name = tensor<string, []>("const_1"), val = tensor<int32, []>(0)];
|
89 |
+
tensor<int32, []> const_2 = const()[name = tensor<string, []>("const_2"), val = tensor<int32, []>(1)];
|
90 |
+
tensor<int32, []> gather_5_cast_uint16_to_int32 = cast(dtype = gather_5_cast_uint16_to_int32_dtype_0, x = gather_5_cast_uint16)[name = tensor<string, []>("cast_19")];
|
91 |
+
tensor<int32, [?]> var_124 = range_1d(end = gather_5_cast_uint16_to_int32, start = const_1, step = const_2)[name = tensor<string, []>("op_124")];
|
92 |
+
tensor<int32, [1]> var_126_axes_0 = const()[name = tensor<string, []>("op_126_axes_0"), val = tensor<int32, [1]>([0])];
|
93 |
+
tensor<int32, [1, ?]> var_126 = expand_dims(axes = var_126_axes_0, x = var_124)[name = tensor<string, []>("op_126")];
|
94 |
+
tensor<int32, []> concat_2_axis_0 = const()[name = tensor<string, []>("concat_2_axis_0"), val = tensor<int32, []>(0)];
|
95 |
+
tensor<bool, []> concat_2_interleave_0 = const()[name = tensor<string, []>("concat_2_interleave_0"), val = tensor<bool, []>(false)];
|
96 |
+
tensor<int32, [2]> concat_2 = concat(axis = concat_2_axis_0, interleave = concat_2_interleave_0, values = (var_114, gather_5_cast_uint16_to_int32))[name = tensor<string, []>("concat_2")];
|
97 |
+
tensor<int32, [2]> shape_6 = shape(x = var_126)[name = tensor<string, []>("shape_6")];
|
98 |
+
tensor<int32, [2]> real_div_0 = real_div(x = concat_2, y = shape_6)[name = tensor<string, []>("real_div_0")];
|
99 |
+
tensor<int32, [?, ?]> time_steps = tile(reps = real_div_0, x = var_126)[name = tensor<string, []>("time_steps")];
|
100 |
+
tensor<int32, [1]> var_131_axes_0 = const()[name = tensor<string, []>("op_131_axes_0"), val = tensor<int32, [1]>([1])];
|
101 |
+
tensor<int32, [1]> melspectrogram_length = cast(dtype = cast_0_dtype_0, x = seq_len_1_cast_fp16)[name = tensor<string, []>("cast_22")];
|
102 |
+
tensor<int32, [1, 1]> var_131 = expand_dims(axes = var_131_axes_0, x = melspectrogram_length)[name = tensor<string, []>("op_131")];
|
103 |
+
tensor<bool, [?, ?]> valid_mask = less(x = time_steps, y = var_131)[name = tensor<string, []>("valid_mask")];
|
104 |
+
tensor<int32, [1]> var_134_axes_0 = const()[name = tensor<string, []>("op_134_axes_0"), val = tensor<int32, [1]>([1])];
|
105 |
+
tensor<bool, [?, 1, ?]> var_134 = expand_dims(axes = var_134_axes_0, x = valid_mask)[name = tensor<string, []>("op_134")];
|
106 |
+
tensor<fp16, []> var_135_to_fp16 = const()[name = tensor<string, []>("op_135_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
107 |
+
tensor<fp16, [1, 128, ?]> var_136_cast_fp16 = select(a = x_11_cast_fp16, b = var_135_to_fp16, cond = var_134)[name = tensor<string, []>("op_136_cast_fp16")];
|
108 |
+
tensor<int32, [1]> x_mean_numerator_axes_0 = const()[name = tensor<string, []>("x_mean_numerator_axes_0"), val = tensor<int32, [1]>([2])];
|
109 |
+
tensor<bool, []> x_mean_numerator_keep_dims_0 = const()[name = tensor<string, []>("x_mean_numerator_keep_dims_0"), val = tensor<bool, []>(false)];
|
110 |
+
tensor<fp16, [1, 128]> x_mean_numerator_cast_fp16 = reduce_sum(axes = x_mean_numerator_axes_0, keep_dims = x_mean_numerator_keep_dims_0, x = var_136_cast_fp16)[name = tensor<string, []>("x_mean_numerator_cast_fp16")];
|
111 |
+
tensor<int32, [1]> var_146_axes_0 = const()[name = tensor<string, []>("op_146_axes_0"), val = tensor<int32, [1]>([1])];
|
112 |
+
tensor<bool, []> var_146_keep_dims_0 = const()[name = tensor<string, []>("op_146_keep_dims_0"), val = tensor<bool, []>(false)];
|
113 |
+
tensor<string, []> cast_4_to_fp16_dtype_0 = const()[name = tensor<string, []>("cast_4_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
114 |
+
tensor<fp16, [?, ?]> valid_mask_to_fp16 = cast(dtype = cast_4_to_fp16_dtype_0, x = valid_mask)[name = tensor<string, []>("cast_18")];
|
115 |
+
tensor<fp16, [?]> var_146_cast_fp16 = reduce_sum(axes = var_146_axes_0, keep_dims = var_146_keep_dims_0, x = valid_mask_to_fp16)[name = tensor<string, []>("op_146_cast_fp16")];
|
116 |
+
tensor<fp16, []> var_147_promoted_to_fp16 = const()[name = tensor<string, []>("op_147_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
117 |
+
tensor<fp16, []> const_3_to_fp16 = const()[name = tensor<string, []>("const_3_to_fp16"), val = tensor<fp16, []>(inf)];
|
118 |
+
tensor<fp16, [?]> clip_0_cast_fp16 = clip(alpha = var_147_promoted_to_fp16, beta = const_3_to_fp16, x = var_146_cast_fp16)[name = tensor<string, []>("clip_0_cast_fp16")];
|
119 |
+
tensor<int32, [1]> var_151_axes_0 = const()[name = tensor<string, []>("op_151_axes_0"), val = tensor<int32, [1]>([1])];
|
120 |
+
tensor<fp16, [?, 1]> var_151_cast_fp16 = expand_dims(axes = var_151_axes_0, x = clip_0_cast_fp16)[name = tensor<string, []>("op_151_cast_fp16")];
|
121 |
+
tensor<fp16, [?, 128]> x_mean_cast_fp16 = real_div(x = x_mean_numerator_cast_fp16, y = var_151_cast_fp16)[name = tensor<string, []>("x_mean_cast_fp16")];
|
122 |
+
tensor<fp16, []> var_156_to_fp16 = const()[name = tensor<string, []>("op_156_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
123 |
+
tensor<fp16, [?, 1]> var_157_cast_fp16 = sub(x = var_151_cast_fp16, y = var_156_to_fp16)[name = tensor<string, []>("op_157_cast_fp16")];
|
124 |
+
tensor<fp16, []> var_158_to_fp16 = const()[name = tensor<string, []>("op_158_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
125 |
+
tensor<fp16, []> const_4_to_fp16 = const()[name = tensor<string, []>("const_4_to_fp16"), val = tensor<fp16, []>(inf)];
|
126 |
+
tensor<fp16, [?, 1]> clip_1_cast_fp16 = clip(alpha = var_158_to_fp16, beta = const_4_to_fp16, x = var_157_cast_fp16)[name = tensor<string, []>("clip_1_cast_fp16")];
|
127 |
+
tensor<int32, [1]> var_164_axes_0 = const()[name = tensor<string, []>("op_164_axes_0"), val = tensor<int32, [1]>([2])];
|
128 |
+
tensor<fp16, [?, 128, 1]> var_164_cast_fp16 = expand_dims(axes = var_164_axes_0, x = x_mean_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
129 |
+
tensor<fp16, [?, 128, ?]> var_166_cast_fp16 = sub(x = x_11_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("op_166_cast_fp16")];
|
130 |
+
tensor<fp16, []> var_167_to_fp16 = const()[name = tensor<string, []>("op_167_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
131 |
+
tensor<fp16, [?, 128, ?]> var_168_cast_fp16 = select(a = var_166_cast_fp16, b = var_167_to_fp16, cond = var_134)[name = tensor<string, []>("op_168_cast_fp16")];
|
132 |
+
tensor<fp16, []> var_169_promoted_to_fp16 = const()[name = tensor<string, []>("op_169_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
|
133 |
+
tensor<fp16, [?, 128, ?]> var_170_cast_fp16 = pow(x = var_168_cast_fp16, y = var_169_promoted_to_fp16)[name = tensor<string, []>("op_170_cast_fp16")];
|
134 |
+
tensor<int32, [1]> var_175_axes_0 = const()[name = tensor<string, []>("op_175_axes_0"), val = tensor<int32, [1]>([2])];
|
135 |
+
tensor<bool, []> var_175_keep_dims_0 = const()[name = tensor<string, []>("op_175_keep_dims_0"), val = tensor<bool, []>(false)];
|
136 |
+
tensor<fp16, [?, 128]> var_175_cast_fp16 = reduce_sum(axes = var_175_axes_0, keep_dims = var_175_keep_dims_0, x = var_170_cast_fp16)[name = tensor<string, []>("op_175_cast_fp16")];
|
137 |
+
tensor<fp16, [?, 128]> var_176_cast_fp16 = real_div(x = var_175_cast_fp16, y = clip_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
138 |
+
tensor<fp16, [?, 128]> x_std_1_cast_fp16 = sqrt(x = var_176_cast_fp16)[name = tensor<string, []>("x_std_1_cast_fp16")];
|
139 |
+
tensor<fp16, []> var_178_to_fp16 = const()[name = tensor<string, []>("op_178_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
140 |
+
tensor<fp16, [?, 128]> x_std_cast_fp16 = add(x = x_std_1_cast_fp16, y = var_178_to_fp16)[name = tensor<string, []>("x_std_cast_fp16")];
|
141 |
+
tensor<int32, [1]> var_186_axes_0 = const()[name = tensor<string, []>("op_186_axes_0"), val = tensor<int32, [1]>([2])];
|
142 |
+
tensor<fp16, [?, 128, 1]> var_186_cast_fp16 = expand_dims(axes = var_186_axes_0, x = x_std_cast_fp16)[name = tensor<string, []>("op_186_cast_fp16")];
|
143 |
+
tensor<fp16, [?, 128, ?]> x_cast_fp16 = real_div(x = var_166_cast_fp16, y = var_186_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
|
144 |
+
tensor<int32, [3]> var_189_shape_cast_fp16 = shape(x = x_cast_fp16)[name = tensor<string, []>("op_189_shape_cast_fp16")];
|
145 |
+
tensor<int32, []> gather_6_axis_0 = const()[name = tensor<string, []>("gather_6_axis_0"), val = tensor<int32, []>(0)];
|
146 |
+
tensor<int32, []> gather_6_batch_dims_0 = const()[name = tensor<string, []>("gather_6_batch_dims_0"), val = tensor<int32, []>(0)];
|
147 |
+
tensor<bool, []> gather_6_validate_indices_0 = const()[name = tensor<string, []>("gather_6_validate_indices_0"), val = tensor<bool, []>(false)];
|
148 |
+
tensor<string, []> var_189_shape_cast_fp16_to_uint16_dtype_0 = const()[name = tensor<string, []>("op_189_shape_cast_fp16_to_uint16_dtype_0"), val = tensor<string, []>("uint16")];
|
149 |
+
tensor<uint16, []> select_6_to_uint16 = const()[name = tensor<string, []>("select_6_to_uint16"), val = tensor<uint16, []>(2)];
|
150 |
+
tensor<uint16, [3]> var_189_shape_cast_fp16_to_uint16 = cast(dtype = var_189_shape_cast_fp16_to_uint16_dtype_0, x = var_189_shape_cast_fp16)[name = tensor<string, []>("cast_17")];
|
151 |
+
tensor<uint16, []> gather_6_cast_uint16 = gather(axis = gather_6_axis_0, batch_dims = gather_6_batch_dims_0, indices = select_6_to_uint16, validate_indices = gather_6_validate_indices_0, x = var_189_shape_cast_fp16_to_uint16)[name = tensor<string, []>("gather_6_cast_uint16")];
|
152 |
+
tensor<string, []> gather_6_cast_uint16_to_int32_dtype_0 = const()[name = tensor<string, []>("gather_6_cast_uint16_to_int32_dtype_0"), val = tensor<string, []>("int32")];
|
153 |
+
tensor<int32, []> const_5 = const()[name = tensor<string, []>("const_5"), val = tensor<int32, []>(0)];
|
154 |
+
tensor<int32, []> const_6 = const()[name = tensor<string, []>("const_6"), val = tensor<int32, []>(1)];
|
155 |
+
tensor<int32, []> gather_6_cast_uint16_to_int32 = cast(dtype = gather_6_cast_uint16_to_int32_dtype_0, x = gather_6_cast_uint16)[name = tensor<string, []>("cast_16")];
|
156 |
+
tensor<int32, [?]> mask_1 = range_1d(end = gather_6_cast_uint16_to_int32, start = const_5, step = const_6)[name = tensor<string, []>("mask_1")];
|
157 |
+
tensor<int32, []> gather_7_axis_0 = const()[name = tensor<string, []>("gather_7_axis_0"), val = tensor<int32, []>(0)];
|
158 |
+
tensor<int32, []> gather_7_batch_dims_0 = const()[name = tensor<string, []>("gather_7_batch_dims_0"), val = tensor<int32, []>(0)];
|
159 |
+
tensor<bool, []> gather_7_validate_indices_0 = const()[name = tensor<string, []>("gather_7_validate_indices_0"), val = tensor<bool, []>(false)];
|
160 |
+
tensor<uint16, []> select_7_to_uint16 = const()[name = tensor<string, []>("select_7_to_uint16"), val = tensor<uint16, []>(0)];
|
161 |
+
tensor<uint16, []> gather_7_cast_uint16 = gather(axis = gather_7_axis_0, batch_dims = gather_7_batch_dims_0, indices = select_7_to_uint16, validate_indices = gather_7_validate_indices_0, x = var_189_shape_cast_fp16_to_uint16)[name = tensor<string, []>("gather_7_cast_uint16")];
|
162 |
+
tensor<string, []> gather_7_cast_uint16_to_int32_dtype_0 = const()[name = tensor<string, []>("gather_7_cast_uint16_to_int32_dtype_0"), val = tensor<string, []>("int32")];
|
163 |
+
tensor<int32, []> var_201 = const()[name = tensor<string, []>("op_201"), val = tensor<int32, []>(1)];
|
164 |
+
tensor<int32, []> concat_3_axis_0 = const()[name = tensor<string, []>("concat_3_axis_0"), val = tensor<int32, []>(0)];
|
165 |
+
tensor<bool, []> concat_3_interleave_0 = const()[name = tensor<string, []>("concat_3_interleave_0"), val = tensor<bool, []>(false)];
|
166 |
+
tensor<int32, []> gather_7_cast_uint16_to_int32 = cast(dtype = gather_7_cast_uint16_to_int32_dtype_0, x = gather_7_cast_uint16)[name = tensor<string, []>("cast_15")];
|
167 |
+
tensor<int32, [2]> concat_3 = concat(axis = concat_3_axis_0, interleave = concat_3_interleave_0, values = (gather_7_cast_uint16_to_int32, var_201))[name = tensor<string, []>("concat_3")];
|
168 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
169 |
+
tensor<int32, [1, ?]> expand_dims_0 = expand_dims(axes = expand_dims_0_axes_0, x = mask_1)[name = tensor<string, []>("expand_dims_0")];
|
170 |
+
tensor<int32, [?, ?]> var_203 = tile(reps = concat_3, x = expand_dims_0)[name = tensor<string, []>("op_203")];
|
171 |
+
tensor<bool, [?, ?]> mask = greater_equal(x = var_203, y = var_131)[name = tensor<string, []>("mask")];
|
172 |
+
tensor<int32, [1]> var_208_axes_0 = const()[name = tensor<string, []>("op_208_axes_0"), val = tensor<int32, [1]>([1])];
|
173 |
+
tensor<bool, [?, 1, ?]> var_208 = expand_dims(axes = var_208_axes_0, x = mask)[name = tensor<string, []>("op_208")];
|
174 |
+
tensor<fp16, []> var_222_to_fp16 = const()[name = tensor<string, []>("op_222_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
175 |
+
tensor<fp16, [?, 128, ?]> melspectrogram = select(a = var_222_to_fp16, b = x_cast_fp16, cond = var_208)[name = tensor<string, []>("op_223_cast_fp16")];
|
176 |
+
} -> (melspectrogram, melspectrogram_length);
|
177 |
+
}
|
Melspectrogram_v2.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:023c2303b7c3a1fafed92fc6ec46c1d43a48c0bbcdf33d6441d383a61747734c
|
3 |
+
size 592384
|
ParakeetDecoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b657cd50e3e76a508244d46d5270f3bc0dcb047b0cabc684144cadc173dba1e5
|
3 |
+
size 243
|
ParakeetDecoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1b9018f1de0d2c7d3b3c962832919af1efbcf6476990f737e11b23b37c46f0a
|
3 |
+
size 436
|
ParakeetDecoder.mlmodelc/model.mil
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3405.2.1"}, {"coremltools-component-torch", "2.4.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios15>(tensor<fp32, [2, 1, 640]> c_in, tensor<fp32, [2, 1, 640]> h_in, tensor<int32, [1]> target_lengths, tensor<int32, [1, ?]> targets) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, list<tensor<int32, [2]>, ?>>>>((("DefaultShapes", {{"targets", [1, 1]}}), ("RangeDims", {{"targets", [[1, 1], [1, 1000]]}})))] {
|
5 |
+
tensor<int32, []> input_axis_0 = const()[name = tensor<string, []>("input_axis_0"), val = tensor<int32, []>(0)];
|
6 |
+
tensor<fp16, [1025, 640]> embed_weight_to_fp16 = const()[name = tensor<string, []>("embed_weight_to_fp16"), val = tensor<fp16, [1025, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
7 |
+
tensor<fp16, [1, ?, 640]> input_cast_fp16 = gather(axis = input_axis_0, indices = targets, x = embed_weight_to_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
8 |
+
tensor<string, []> input_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("input_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
9 |
+
tensor<int32, []> split_0_num_splits_0 = const()[name = tensor<string, []>("split_0_num_splits_0"), val = tensor<int32, []>(2)];
|
10 |
+
tensor<int32, []> split_0_axis_0 = const()[name = tensor<string, []>("split_0_axis_0"), val = tensor<int32, []>(0)];
|
11 |
+
tensor<string, []> h_in_to_fp16_dtype_0 = const()[name = tensor<string, []>("h_in_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
12 |
+
tensor<fp16, [2, 1, 640]> cast_12 = cast(dtype = h_in_to_fp16_dtype_0, x = h_in)[name = tensor<string, []>("cast_12")];
|
13 |
+
tensor<fp16, [1, 1, 640]> split_0_cast_fp16_0, tensor<fp16, [1, 1, 640]> split_0_cast_fp16_1 = split(axis = split_0_axis_0, num_splits = split_0_num_splits_0, x = cast_12)[name = tensor<string, []>("split_0_cast_fp16")];
|
14 |
+
tensor<int32, []> split_1_num_splits_0 = const()[name = tensor<string, []>("split_1_num_splits_0"), val = tensor<int32, []>(2)];
|
15 |
+
tensor<int32, []> split_1_axis_0 = const()[name = tensor<string, []>("split_1_axis_0"), val = tensor<int32, []>(0)];
|
16 |
+
tensor<string, []> c_in_to_fp16_dtype_0 = const()[name = tensor<string, []>("c_in_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
17 |
+
tensor<fp16, [2, 1, 640]> cast_11 = cast(dtype = c_in_to_fp16_dtype_0, x = c_in)[name = tensor<string, []>("cast_11")];
|
18 |
+
tensor<fp16, [1, 1, 640]> split_1_cast_fp16_0, tensor<fp16, [1, 1, 640]> split_1_cast_fp16_1 = split(axis = split_1_axis_0, num_splits = split_1_num_splits_0, x = cast_11)[name = tensor<string, []>("split_1_cast_fp16")];
|
19 |
+
tensor<fp32, [2560]> concat_0 = const()[name = tensor<string, []>("concat_0"), val = tensor<fp32, [2560]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1312128)))];
|
20 |
+
tensor<fp32, [2560, 640]> concat_1 = const()[name = tensor<string, []>("concat_1"), val = tensor<fp32, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1322432)))];
|
21 |
+
tensor<fp32, [2560, 640]> concat_2 = const()[name = tensor<string, []>("concat_2"), val = tensor<fp32, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7876096)))];
|
22 |
+
tensor<int32, [1]> var_25_lstm_layer_0_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
|
23 |
+
tensor<fp16, [1, 640]> var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_layer_0_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_0)[name = tensor<string, []>("op_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16")];
|
24 |
+
tensor<string, []> var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
25 |
+
tensor<int32, [1]> var_25_lstm_layer_0_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
|
26 |
+
tensor<fp16, [1, 640]> var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_layer_0_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_0)[name = tensor<string, []>("op_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16")];
|
27 |
+
tensor<string, []> var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
28 |
+
tensor<string, []> var_25_lstm_layer_0_direction_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_direction_0"), val = tensor<string, []>("forward")];
|
29 |
+
tensor<bool, []> var_25_lstm_layer_0_output_sequence_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_output_sequence_0"), val = tensor<bool, []>(true)];
|
30 |
+
tensor<string, []> var_25_lstm_layer_0_recurrent_activation_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
31 |
+
tensor<string, []> var_25_lstm_layer_0_cell_activation_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_cell_activation_0"), val = tensor<string, []>("tanh")];
|
32 |
+
tensor<string, []> var_25_lstm_layer_0_activation_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_activation_0"), val = tensor<string, []>("tanh")];
|
33 |
+
tensor<fp32, [1, 640]> cast_9 = cast(dtype = var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16)[name = tensor<string, []>("cast_9")];
|
34 |
+
tensor<fp32, [1, 640]> cast_10 = cast(dtype = var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16)[name = tensor<string, []>("cast_10")];
|
35 |
+
tensor<fp32, [1, ?, 640]> cast_13 = cast(dtype = input_cast_fp16_to_fp32_dtype_0, x = input_cast_fp16)[name = tensor<string, []>("cast_13")];
|
36 |
+
tensor<fp32, [1, ?, 640]> var_25_lstm_layer_0_0, tensor<fp32, [?, 640]> var_25_lstm_layer_0_1, tensor<fp32, [?, 640]> var_25_lstm_layer_0_2 = lstm(activation = var_25_lstm_layer_0_activation_0, bias = concat_0, cell_activation = var_25_lstm_layer_0_cell_activation_0, direction = var_25_lstm_layer_0_direction_0, initial_c = cast_9, initial_h = cast_10, output_sequence = var_25_lstm_layer_0_output_sequence_0, recurrent_activation = var_25_lstm_layer_0_recurrent_activation_0, weight_hh = concat_2, weight_ih = concat_1, x = cast_13)[name = tensor<string, []>("op_25_lstm_layer_0")];
|
37 |
+
tensor<fp32, [2560]> concat_3 = const()[name = tensor<string, []>("concat_3"), val = tensor<fp32, [2560]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14429760)))];
|
38 |
+
tensor<fp32, [2560, 640]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<fp32, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14440064)))];
|
39 |
+
tensor<fp32, [2560, 640]> concat_5 = const()[name = tensor<string, []>("concat_5"), val = tensor<fp32, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20993728)))];
|
40 |
+
tensor<int32, [1]> var_25_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("op_25_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
|
41 |
+
tensor<fp16, [1, 640]> var_25_lstm_h0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_1)[name = tensor<string, []>("op_25_lstm_h0_squeeze_cast_fp16")];
|
42 |
+
tensor<string, []> var_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
43 |
+
tensor<int32, [1]> var_25_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("op_25_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
|
44 |
+
tensor<fp16, [1, 640]> var_25_lstm_c0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_1)[name = tensor<string, []>("op_25_lstm_c0_squeeze_cast_fp16")];
|
45 |
+
tensor<string, []> var_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
46 |
+
tensor<string, []> var_25_direction_0 = const()[name = tensor<string, []>("op_25_direction_0"), val = tensor<string, []>("forward")];
|
47 |
+
tensor<bool, []> var_25_output_sequence_0 = const()[name = tensor<string, []>("op_25_output_sequence_0"), val = tensor<bool, []>(true)];
|
48 |
+
tensor<string, []> var_25_recurrent_activation_0 = const()[name = tensor<string, []>("op_25_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
49 |
+
tensor<string, []> var_25_cell_activation_0 = const()[name = tensor<string, []>("op_25_cell_activation_0"), val = tensor<string, []>("tanh")];
|
50 |
+
tensor<string, []> var_25_activation_0 = const()[name = tensor<string, []>("op_25_activation_0"), val = tensor<string, []>("tanh")];
|
51 |
+
tensor<fp32, [1, 640]> cast_7 = cast(dtype = var_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_c0_squeeze_cast_fp16)[name = tensor<string, []>("cast_7")];
|
52 |
+
tensor<fp32, [1, 640]> cast_8 = cast(dtype = var_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_h0_squeeze_cast_fp16)[name = tensor<string, []>("cast_8")];
|
53 |
+
tensor<fp32, [1, ?, 640]> decoder_output, tensor<fp32, [?, 640]> var_25_1, tensor<fp32, [?, 640]> var_25_2 = lstm(activation = var_25_activation_0, bias = concat_3, cell_activation = var_25_cell_activation_0, direction = var_25_direction_0, initial_c = cast_7, initial_h = cast_8, output_sequence = var_25_output_sequence_0, recurrent_activation = var_25_recurrent_activation_0, weight_hh = concat_5, weight_ih = concat_4, x = var_25_lstm_layer_0_0)[name = tensor<string, []>("op_25")];
|
54 |
+
tensor<int32, []> var_26_axis_0 = const()[name = tensor<string, []>("op_26_axis_0"), val = tensor<int32, []>(0)];
|
55 |
+
tensor<string, []> var_25_lstm_layer_0_1_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_1_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
56 |
+
tensor<string, []> var_25_1_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_25_1_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
57 |
+
tensor<fp16, [?, 640]> cast_5 = cast(dtype = var_25_1_to_fp16_dtype_0, x = var_25_1)[name = tensor<string, []>("cast_5")];
|
58 |
+
tensor<fp16, [?, 640]> cast_6 = cast(dtype = var_25_lstm_layer_0_1_to_fp16_dtype_0, x = var_25_lstm_layer_0_1)[name = tensor<string, []>("cast_6")];
|
59 |
+
tensor<fp16, [2, ?, 640]> var_26_cast_fp16 = stack(axis = var_26_axis_0, values = (cast_6, cast_5))[name = tensor<string, []>("op_26_cast_fp16")];
|
60 |
+
tensor<string, []> var_26_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_26_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
61 |
+
tensor<int32, []> var_27_axis_0 = const()[name = tensor<string, []>("op_27_axis_0"), val = tensor<int32, []>(0)];
|
62 |
+
tensor<string, []> var_25_lstm_layer_0_2_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_25_lstm_layer_0_2_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
63 |
+
tensor<string, []> var_25_2_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_25_2_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
64 |
+
tensor<fp16, [?, 640]> cast_2 = cast(dtype = var_25_2_to_fp16_dtype_0, x = var_25_2)[name = tensor<string, []>("cast_2")];
|
65 |
+
tensor<fp16, [?, 640]> cast_3 = cast(dtype = var_25_lstm_layer_0_2_to_fp16_dtype_0, x = var_25_lstm_layer_0_2)[name = tensor<string, []>("cast_3")];
|
66 |
+
tensor<fp16, [2, ?, 640]> var_27_cast_fp16 = stack(axis = var_27_axis_0, values = (cast_3, cast_2))[name = tensor<string, []>("op_27_cast_fp16")];
|
67 |
+
tensor<string, []> var_27_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_27_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
68 |
+
tensor<fp32, [2, ?, 640]> c_out = cast(dtype = var_27_cast_fp16_to_fp32_dtype_0, x = var_27_cast_fp16)[name = tensor<string, []>("cast_1")];
|
69 |
+
tensor<fp32, [2, ?, 640]> h_out = cast(dtype = var_26_cast_fp16_to_fp32_dtype_0, x = var_26_cast_fp16)[name = tensor<string, []>("cast_4")];
|
70 |
+
tensor<int32, [1]> target_lengths_tmp = identity(x = target_lengths)[name = tensor<string, []>("target_lengths_tmp")];
|
71 |
+
} -> (decoder_output, h_out, c_out);
|
72 |
+
}
|
ParakeetDecoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1fd28df8a6356e3f95f52bfcd4b735ba004caaf7c82348a8c5eb970ecc3e6e4a
|
3 |
+
size 27547392
|
ParakeetEncoder_v2.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:810d10e271c983398343c8a6372cd77e93b8fa38fa87e5440b52d70141541d3f
|
3 |
+
size 243
|
ParakeetEncoder_v2.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a3fdaf971cab989eead103325b0a3d5e6702a4461d803b856b7f39177dc7f2e
|
3 |
+
size 386
|
ParakeetEncoder_v2.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Int8)",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 126 × 1024)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 126, 1024]",
|
13 |
+
"name" : "encoder_output",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Int32",
|
20 |
+
"formattedType" : "MultiArray (Int32 1)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1]",
|
23 |
+
"name" : "encoder_output_length",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
}
|
26 |
+
],
|
27 |
+
"modelParameters" : [
|
28 |
+
|
29 |
+
],
|
30 |
+
"specificationVersion" : 8,
|
31 |
+
"mlProgramOperationTypeHistogram" : {
|
32 |
+
"Ios17.floor" : 3,
|
33 |
+
"Ios17.logicalAnd" : 2,
|
34 |
+
"Ios17.reshape" : 145,
|
35 |
+
"Ios16.softmax" : 24,
|
36 |
+
"Ios17.matmul" : 72,
|
37 |
+
"Ios17.transpose" : 171,
|
38 |
+
"Split" : 24,
|
39 |
+
"Ios17.expandDims" : 5,
|
40 |
+
"Select" : 72,
|
41 |
+
"Ios17.add" : 174,
|
42 |
+
"Tile" : 1,
|
43 |
+
"Ios17.sliceByIndex" : 48,
|
44 |
+
"Ios16.sigmoid" : 24,
|
45 |
+
"Pad" : 48,
|
46 |
+
"Ios17.logicalNot" : 2,
|
47 |
+
"Ios17.layerNorm" : 120,
|
48 |
+
"Ios16.constexprAffineDequantize" : 342,
|
49 |
+
"Ios16.silu" : 72,
|
50 |
+
"Ios17.less" : 1,
|
51 |
+
"Ios17.conv" : 77,
|
52 |
+
"Ios16.relu" : 3,
|
53 |
+
"Ios17.cast" : 3,
|
54 |
+
"Ios17.linear" : 193,
|
55 |
+
"Ios17.mul" : 99
|
56 |
+
},
|
57 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
58 |
+
"isUpdatable" : "0",
|
59 |
+
"stateSchema" : [
|
60 |
+
|
61 |
+
],
|
62 |
+
"availability" : {
|
63 |
+
"macOS" : "14.0",
|
64 |
+
"tvOS" : "17.0",
|
65 |
+
"visionOS" : "1.0",
|
66 |
+
"watchOS" : "10.0",
|
67 |
+
"iOS" : "17.0",
|
68 |
+
"macCatalyst" : "17.0"
|
69 |
+
},
|
70 |
+
"modelType" : {
|
71 |
+
"name" : "MLModelType_mlProgram"
|
72 |
+
},
|
73 |
+
"userDefinedMetadata" : {
|
74 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
75 |
+
"com.github.apple.coremltools.source" : "torch==2.5.0",
|
76 |
+
"com.github.apple.coremltools.version" : "8.3.0"
|
77 |
+
},
|
78 |
+
"inputSchema" : [
|
79 |
+
{
|
80 |
+
"hasShapeFlexibility" : "0",
|
81 |
+
"isOptional" : "0",
|
82 |
+
"dataType" : "Float32",
|
83 |
+
"formattedType" : "MultiArray (Float32 1 × 128 × 1001)",
|
84 |
+
"shortDescription" : "",
|
85 |
+
"shape" : "[1, 128, 1001]",
|
86 |
+
"name" : "audio_signal",
|
87 |
+
"type" : "MultiArray"
|
88 |
+
},
|
89 |
+
{
|
90 |
+
"hasShapeFlexibility" : "0",
|
91 |
+
"isOptional" : "0",
|
92 |
+
"dataType" : "Int32",
|
93 |
+
"formattedType" : "MultiArray (Int32 1)",
|
94 |
+
"shortDescription" : "",
|
95 |
+
"shape" : "[1]",
|
96 |
+
"name" : "length",
|
97 |
+
"type" : "MultiArray"
|
98 |
+
}
|
99 |
+
],
|
100 |
+
"generatedClassName" : "ParakeetEncoder_int8",
|
101 |
+
"method" : "predict"
|
102 |
+
}
|
103 |
+
]
|
ParakeetEncoder_v2.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ParakeetEncoder_v2.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:58ebcc6f19497bf6b2c272a7d699dedf26a269a383b0c889f159cd65b33b1f6d
|
3 |
+
size 591108480
|
README.md
CHANGED
@@ -1,3 +1,56 @@
|
|
1 |
-
---
|
2 |
-
license: cc-by-4.0
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: cc-by-4.0
|
3 |
+
thumbnail: null
|
4 |
+
tags:
|
5 |
+
- automatic-speech-recognition
|
6 |
+
- speech
|
7 |
+
- audio
|
8 |
+
- Transducer
|
9 |
+
- TDT
|
10 |
+
- FastConformer
|
11 |
+
- Conformer
|
12 |
+
- pytorch
|
13 |
+
- NeMo
|
14 |
+
- hf-asr-leaderboard
|
15 |
+
- coreml
|
16 |
+
- apple
|
17 |
+
language:
|
18 |
+
- en
|
19 |
+
pipeline_tag: automatic-speech-recognition
|
20 |
+
base_model:
|
21 |
+
- nvidia/parakeet-tdt-0.6b-v2
|
22 |
+
---
|
23 |
+
|
24 |
+
# Parakeet TDT 0.6B V2 - CoreML
|
25 |
+
|
26 |
+
This is a CoreML-optimized version of NVIDIA's Parakeet TDT 0.6B V2 model, designed for high-performance automatic speech recognition on Apple platforms.
|
27 |
+
|
28 |
+
## Model Description
|
29 |
+
|
30 |
+
Models will continue to evolve as we optimize performance and accuracy. This model has been converted to CoreML format for efficient on-device inference on Apple Silicon and iOS devices, enabling real-time speech recognition with
|
31 |
+
minimal memory footprint.
|
32 |
+
|
33 |
+
## Usage in Swift
|
34 |
+
|
35 |
+
See the [FluidAudio repository](https://github.com/FluidInference/FluidAudioSwift) for instructions.
|
36 |
+
|
37 |
+
## Performance
|
38 |
+
|
39 |
+
- Real-time factor: ~110x on M4 Pro
|
40 |
+
- Memory usage: ~800MB peak
|
41 |
+
- Supported platforms: macOS 14+, iOS 17+
|
42 |
+
- Optimized for: Apple Silicon
|
43 |
+
|
44 |
+
## Model Details
|
45 |
+
|
46 |
+
- Architecture: FastConformer-TDT
|
47 |
+
- Parameters: 0.6B
|
48 |
+
- Sample rate: 16kHz
|
49 |
+
|
50 |
+
## License
|
51 |
+
|
52 |
+
This model is released under the CC-BY-4.0 license. See the LICENSE file for details.
|
53 |
+
|
54 |
+
Acknowledgments
|
55 |
+
|
56 |
+
Based on NVIDIA's Parakeet TDT model. CoreML conversion and Swift integration by the FluidInference team.
|
RNNTJoint.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f18d0665c71c76eae9ed704875e89feae268c69d52581f673a73101903c29e81
|
3 |
+
size 243
|
RNNTJoint.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5fe0254d313f1046d438a606112845c7fc89462e0174f6184a746485918db67b
|
3 |
+
size 392
|
RNNTJoint.mlmodelc/model.mil
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3405.2.1"}, {"coremltools-component-torch", "2.4.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios15>(tensor<fp32, [?, ?, ?]> decoder_outputs, tensor<fp32, [?, ?, ?]> encoder_outputs) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, list<tensor<int32, [2]>, ?>>>>((("DefaultShapes", {{"decoder_outputs", [1, 1, 1]}, {"encoder_outputs", [1, 1, 1]}}), ("RangeDims", {{"decoder_outputs", [[1, 100], [1, 1025], [1, 640]]}, {"encoder_outputs", [[1, 100], [1, 1025], [1, 1024]]}})))] {
|
5 |
+
tensor<string, []> encoder_outputs_to_fp16_dtype_0 = const()[name = tensor<string, []>("encoder_outputs_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
6 |
+
tensor<fp16, [640, 1024]> joint_enc_weight_to_fp16 = const()[name = tensor<string, []>("joint_enc_weight_to_fp16"), val = tensor<fp16, [640, 1024]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
7 |
+
tensor<fp16, [640]> joint_enc_bias_to_fp16 = const()[name = tensor<string, []>("joint_enc_bias_to_fp16"), val = tensor<fp16, [640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1310848)))];
|
8 |
+
tensor<fp16, [?, ?, ?]> cast_2 = cast(dtype = encoder_outputs_to_fp16_dtype_0, x = encoder_outputs)[name = tensor<string, []>("cast_2")];
|
9 |
+
tensor<fp16, [?, ?, 640]> linear_0_cast_fp16 = linear(bias = joint_enc_bias_to_fp16, weight = joint_enc_weight_to_fp16, x = cast_2)[name = tensor<string, []>("linear_0_cast_fp16")];
|
10 |
+
tensor<string, []> decoder_outputs_to_fp16_dtype_0 = const()[name = tensor<string, []>("decoder_outputs_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
11 |
+
tensor<fp16, [640, 640]> joint_pred_weight_to_fp16 = const()[name = tensor<string, []>("joint_pred_weight_to_fp16"), val = tensor<fp16, [640, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1312192)))];
|
12 |
+
tensor<fp16, [640]> joint_pred_bias_to_fp16 = const()[name = tensor<string, []>("joint_pred_bias_to_fp16"), val = tensor<fp16, [640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2131456)))];
|
13 |
+
tensor<fp16, [?, ?, ?]> cast_1 = cast(dtype = decoder_outputs_to_fp16_dtype_0, x = decoder_outputs)[name = tensor<string, []>("cast_1")];
|
14 |
+
tensor<fp16, [?, ?, 640]> linear_1_cast_fp16 = linear(bias = joint_pred_bias_to_fp16, weight = joint_pred_weight_to_fp16, x = cast_1)[name = tensor<string, []>("linear_1_cast_fp16")];
|
15 |
+
tensor<int32, [1]> f_axes_0 = const()[name = tensor<string, []>("f_axes_0"), val = tensor<int32, [1]>([2])];
|
16 |
+
tensor<fp16, [?, ?, 1, 640]> f_cast_fp16 = expand_dims(axes = f_axes_0, x = linear_0_cast_fp16)[name = tensor<string, []>("f_cast_fp16")];
|
17 |
+
tensor<int32, [1]> g_axes_0 = const()[name = tensor<string, []>("g_axes_0"), val = tensor<int32, [1]>([1])];
|
18 |
+
tensor<fp16, [?, 1, ?, 640]> g_cast_fp16 = expand_dims(axes = g_axes_0, x = linear_1_cast_fp16)[name = tensor<string, []>("g_cast_fp16")];
|
19 |
+
tensor<fp16, [?, ?, ?, 640]> input_1_cast_fp16 = add(x = f_cast_fp16, y = g_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
20 |
+
tensor<fp16, [?, ?, ?, 640]> input_3_cast_fp16 = relu(x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
21 |
+
tensor<fp16, [1030, 640]> joint_joint_net_2_weight_to_fp16 = const()[name = tensor<string, []>("joint_joint_net_2_weight_to_fp16"), val = tensor<fp16, [1030, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2132800)))];
|
22 |
+
tensor<fp16, [1030]> joint_joint_net_2_bias_to_fp16 = const()[name = tensor<string, []>("joint_joint_net_2_bias_to_fp16"), val = tensor<fp16, [1030]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3451264)))];
|
23 |
+
tensor<fp16, [?, ?, ?, 1030]> linear_2_cast_fp16 = linear(bias = joint_joint_net_2_bias_to_fp16, weight = joint_joint_net_2_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("linear_2_cast_fp16")];
|
24 |
+
tensor<int32, []> var_29 = const()[name = tensor<string, []>("op_29"), val = tensor<int32, []>(-1)];
|
25 |
+
tensor<fp16, [?, ?, ?, 1030]> var_31_softmax_cast_fp16 = softmax(axis = var_29, x = linear_2_cast_fp16)[name = tensor<string, []>("op_31_softmax_cast_fp16")];
|
26 |
+
tensor<fp16, []> var_31_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_31_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
27 |
+
tensor<fp16, [?, ?, ?, 1030]> var_31_cast_fp16 = log(epsilon = var_31_epsilon_0_to_fp16, x = var_31_softmax_cast_fp16)[name = tensor<string, []>("op_31_cast_fp16")];
|
28 |
+
tensor<string, []> var_31_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_31_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
29 |
+
tensor<fp32, [?, ?, ?, 1030]> logits = cast(dtype = var_31_cast_fp16_to_fp32_dtype_0, x = var_31_cast_fp16)[name = tensor<string, []>("cast_0")];
|
30 |
+
} -> (logits);
|
31 |
+
}
|
RNNTJoint.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:863c0b9d0f23533bf3dc20986b917293000bed662f778976b33e1cb0fb3ee1f3
|
3 |
+
size 3453388
|
TokenDurationPrediction.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d844856c854d42e6a58215dae5f75f82ea4da7cb7dbefb60db082a56c3a223dc
|
3 |
+
size 243
|
TokenDurationPrediction.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:03bd0964aae75139a64e2d25090b2c25c4aabe234bc5f63ae23d5e4d616d25d3
|
3 |
+
size 424
|
TokenDurationPrediction.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"shortDescription" : "Token and duration prediction for TDT decoder",
|
4 |
+
"metadataOutputVersion" : "3.0",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Int32",
|
10 |
+
"formattedType" : "MultiArray (Int32 1)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1]",
|
13 |
+
"name" : "var_17",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1]",
|
23 |
+
"name" : "reduce_max_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Int32",
|
30 |
+
"formattedType" : "MultiArray (Int32 1)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1]",
|
33 |
+
"name" : "var_24",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"version" : "1.0",
|
38 |
+
"modelParameters" : [
|
39 |
+
|
40 |
+
],
|
41 |
+
"author" : "FluidAudio",
|
42 |
+
"specificationVersion" : 7,
|
43 |
+
"mlProgramOperationTypeHistogram" : {
|
44 |
+
"SliceByIndex" : 2,
|
45 |
+
"Ios16.reduceArgmax" : 2,
|
46 |
+
"Ios16.reshape" : 1,
|
47 |
+
"Ios16.reduceMax" : 1
|
48 |
+
},
|
49 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
50 |
+
"stateSchema" : [
|
51 |
+
|
52 |
+
],
|
53 |
+
"isUpdatable" : "0",
|
54 |
+
"availability" : {
|
55 |
+
"macOS" : "13.0",
|
56 |
+
"tvOS" : "16.0",
|
57 |
+
"visionOS" : "1.0",
|
58 |
+
"watchOS" : "9.0",
|
59 |
+
"iOS" : "16.0",
|
60 |
+
"macCatalyst" : "16.0"
|
61 |
+
},
|
62 |
+
"modelType" : {
|
63 |
+
"name" : "MLModelType_mlProgram"
|
64 |
+
},
|
65 |
+
"inputSchema" : [
|
66 |
+
{
|
67 |
+
"hasShapeFlexibility" : "0",
|
68 |
+
"isOptional" : "0",
|
69 |
+
"dataType" : "Float16",
|
70 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 1 × 1030)",
|
71 |
+
"shortDescription" : "",
|
72 |
+
"shape" : "[1, 1, 1, 1030]",
|
73 |
+
"name" : "logits",
|
74 |
+
"type" : "MultiArray"
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"userDefinedMetadata" : {
|
78 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
79 |
+
"com.github.apple.coremltools.source" : "torch==2.5.0",
|
80 |
+
"com.github.apple.coremltools.version" : "8.3.0"
|
81 |
+
},
|
82 |
+
"generatedClassName" : "TokenDurationPrediction",
|
83 |
+
"method" : "predict"
|
84 |
+
}
|
85 |
+
]
|
TokenDurationPrediction.mlmodelc/model.mil
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.3.0"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [1, 1, 1, 1030]> logits) {
|
5 |
+
tensor<int32, [1]> var_3 = const()[name = tensor<string, []>("op_3"), val = tensor<int32, [1]>([-1])];
|
6 |
+
tensor<fp16, [1030]> flattened_cast_fp16 = reshape(shape = var_3, x = logits)[name = tensor<string, []>("flattened_cast_fp16")];
|
7 |
+
tensor<int32, [1]> token_logits_begin_0 = const()[name = tensor<string, []>("token_logits_begin_0"), val = tensor<int32, [1]>([0])];
|
8 |
+
tensor<int32, [1]> token_logits_end_0 = const()[name = tensor<string, []>("token_logits_end_0"), val = tensor<int32, [1]>([1025])];
|
9 |
+
tensor<bool, [1]> token_logits_end_mask_0 = const()[name = tensor<string, []>("token_logits_end_mask_0"), val = tensor<bool, [1]>([false])];
|
10 |
+
tensor<fp16, [1025]> token_logits_cast_fp16 = slice_by_index(begin = token_logits_begin_0, end = token_logits_end_0, end_mask = token_logits_end_mask_0, x = flattened_cast_fp16)[name = tensor<string, []>("token_logits_cast_fp16")];
|
11 |
+
tensor<int32, [1]> duration_logits_begin_0 = const()[name = tensor<string, []>("duration_logits_begin_0"), val = tensor<int32, [1]>([1025])];
|
12 |
+
tensor<int32, [1]> duration_logits_end_0 = const()[name = tensor<string, []>("duration_logits_end_0"), val = tensor<int32, [1]>([1])];
|
13 |
+
tensor<bool, [1]> duration_logits_end_mask_0 = const()[name = tensor<string, []>("duration_logits_end_mask_0"), val = tensor<bool, [1]>([true])];
|
14 |
+
tensor<fp16, [5]> duration_logits_cast_fp16 = slice_by_index(begin = duration_logits_begin_0, end = duration_logits_end_0, end_mask = duration_logits_end_mask_0, x = flattened_cast_fp16)[name = tensor<string, []>("duration_logits_cast_fp16")];
|
15 |
+
tensor<int32, []> var_17_axis_0 = const()[name = tensor<string, []>("op_17_axis_0"), val = tensor<int32, []>(0)];
|
16 |
+
tensor<bool, []> var_17_keep_dims_0 = const()[name = tensor<string, []>("op_17_keep_dims_0"), val = tensor<bool, []>(true)];
|
17 |
+
tensor<int32, [1]> var_17 = reduce_argmax(axis = var_17_axis_0, keep_dims = var_17_keep_dims_0, x = token_logits_cast_fp16)[name = tensor<string, []>("op_17_cast_fp16")];
|
18 |
+
tensor<int32, [1]> reduce_max_0_axes_0 = const()[name = tensor<string, []>("reduce_max_0_axes_0"), val = tensor<int32, [1]>([0])];
|
19 |
+
tensor<bool, []> reduce_max_0_keep_dims_0 = const()[name = tensor<string, []>("reduce_max_0_keep_dims_0"), val = tensor<bool, []>(true)];
|
20 |
+
tensor<fp16, [1]> reduce_max_0 = reduce_max(axes = reduce_max_0_axes_0, keep_dims = reduce_max_0_keep_dims_0, x = token_logits_cast_fp16)[name = tensor<string, []>("reduce_max_0_cast_fp16")];
|
21 |
+
tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)];
|
22 |
+
tensor<bool, []> var_24_keep_dims_0 = const()[name = tensor<string, []>("op_24_keep_dims_0"), val = tensor<bool, []>(true)];
|
23 |
+
tensor<int32, [1]> var_24 = reduce_argmax(axis = var_24_axis_0, keep_dims = var_24_keep_dims_0, x = duration_logits_cast_fp16)[name = tensor<string, []>("op_24_cast_fp16")];
|
24 |
+
} -> (var_17, reduce_max_0, var_24);
|
25 |
+
}
|
config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
parakeet_vocab.json
ADDED
@@ -0,0 +1,1033 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"479" : "▁happ",
|
3 |
+
"493" : "ial",
|
4 |
+
"386" : "▁diff",
|
5 |
+
"398" : "▁little",
|
6 |
+
"168" : "▁ch",
|
7 |
+
"872" : "2",
|
8 |
+
"227" : "ie",
|
9 |
+
"251" : "▁R",
|
10 |
+
"488" : "▁day",
|
11 |
+
"38" : "ed",
|
12 |
+
"610" : "▁might",
|
13 |
+
"127" : "▁can",
|
14 |
+
"70" : "ct",
|
15 |
+
"903" : "à",
|
16 |
+
"329" : "▁been",
|
17 |
+
"694" : "ren",
|
18 |
+
"1005" : "Ž",
|
19 |
+
"661" : "▁reg",
|
20 |
+
"597" : "▁around",
|
21 |
+
"442" : "ys",
|
22 |
+
"764" : "▁number",
|
23 |
+
"705" : "▁trans",
|
24 |
+
"275" : "▁had",
|
25 |
+
"271" : "ak",
|
26 |
+
"1024" : "▁warming",
|
27 |
+
"564" : "▁play",
|
28 |
+
"32" : "▁y",
|
29 |
+
"463" : "▁under",
|
30 |
+
"684" : "ute",
|
31 |
+
"529" : "▁quest",
|
32 |
+
"719" : "ock",
|
33 |
+
"235" : "her",
|
34 |
+
"861" : "q",
|
35 |
+
"987" : "Č",
|
36 |
+
"593" : "urn",
|
37 |
+
"555" : "ath",
|
38 |
+
"216" : "▁think",
|
39 |
+
"211" : "el",
|
40 |
+
"500" : "▁bit",
|
41 |
+
"353" : "ry",
|
42 |
+
"490" : "▁Yeah",
|
43 |
+
"750" : "▁sim",
|
44 |
+
"283" : "▁these",
|
45 |
+
"320" : "▁sp",
|
46 |
+
"945" : "β",
|
47 |
+
"179" : "ca",
|
48 |
+
"934" : "т",
|
49 |
+
"258" : "ear",
|
50 |
+
"870" : "R",
|
51 |
+
"974" : "Φ",
|
52 |
+
"157" : "▁H",
|
53 |
+
"280" : "ine",
|
54 |
+
"381" : "▁well",
|
55 |
+
"21" : "▁m",
|
56 |
+
"668" : "▁la",
|
57 |
+
"429" : "▁bo",
|
58 |
+
"812" : "▁ty",
|
59 |
+
"537" : "ual",
|
60 |
+
"743" : "ss",
|
61 |
+
"119" : "▁kn",
|
62 |
+
"863" : "P",
|
63 |
+
"855" : "0",
|
64 |
+
"333" : "ple",
|
65 |
+
"850" : "x",
|
66 |
+
"959" : "я",
|
67 |
+
"467" : "▁bu",
|
68 |
+
"262" : "▁would",
|
69 |
+
"835" : "g",
|
70 |
+
"254" : "▁who",
|
71 |
+
"233" : "▁here",
|
72 |
+
"993" : "ż",
|
73 |
+
"889" : "Z",
|
74 |
+
"231" : "ack",
|
75 |
+
"788" : "ution",
|
76 |
+
"466" : "ning",
|
77 |
+
"332" : "▁did",
|
78 |
+
"388" : "cc",
|
79 |
+
"260" : "▁E",
|
80 |
+
"839" : ",",
|
81 |
+
"582" : "▁three",
|
82 |
+
"1000" : "õ",
|
83 |
+
"73" : "ver",
|
84 |
+
"486" : "ign",
|
85 |
+
"541" : "▁ser",
|
86 |
+
"800" : "▁exper",
|
87 |
+
"192" : "um",
|
88 |
+
"199" : "▁from",
|
89 |
+
"223" : "▁because",
|
90 |
+
"68" : "ent",
|
91 |
+
"608" : "▁tra",
|
92 |
+
"383" : "▁pre",
|
93 |
+
"774" : "▁place",
|
94 |
+
"194" : "▁your",
|
95 |
+
"148" : "ra",
|
96 |
+
"955" : "Ü",
|
97 |
+
"158" : "▁fr",
|
98 |
+
"670" : "▁sub",
|
99 |
+
"906" : "π",
|
100 |
+
"829" : "l",
|
101 |
+
"202" : "pe",
|
102 |
+
"948" : "δ",
|
103 |
+
"480" : "ater",
|
104 |
+
"342" : "ree",
|
105 |
+
"958" : "ś",
|
106 |
+
"967" : "Á",
|
107 |
+
"171" : "ate",
|
108 |
+
"91" : "am",
|
109 |
+
"114" : "ation",
|
110 |
+
"390" : "int",
|
111 |
+
"1013" : "À",
|
112 |
+
"794" : "▁read",
|
113 |
+
"1011" : "ф",
|
114 |
+
"355" : "ble",
|
115 |
+
"921" : "ū",
|
116 |
+
"234" : "▁pe",
|
117 |
+
"573" : "▁id",
|
118 |
+
"785" : "gan",
|
119 |
+
"335" : "▁other",
|
120 |
+
"963" : "Ö",
|
121 |
+
"570" : "get",
|
122 |
+
"2" : "▁th",
|
123 |
+
"273" : "ide",
|
124 |
+
"574" : "▁Oh",
|
125 |
+
"807" : "▁open",
|
126 |
+
"155" : "▁The",
|
127 |
+
"506" : "vers",
|
128 |
+
"729" : "▁sort",
|
129 |
+
"318" : "▁got",
|
130 |
+
"550" : "ank",
|
131 |
+
"896" : "ó",
|
132 |
+
"563" : "ict",
|
133 |
+
"1010" : "з",
|
134 |
+
"520" : "▁gonna",
|
135 |
+
"1020" : "Ψ",
|
136 |
+
"548" : "ved",
|
137 |
+
"848" : "S",
|
138 |
+
"578" : "▁rel",
|
139 |
+
"828" : "r",
|
140 |
+
"361" : "▁cont",
|
141 |
+
"379" : "ish",
|
142 |
+
"259" : "▁tim",
|
143 |
+
"441" : "▁imp",
|
144 |
+
"44" : "as",
|
145 |
+
"620" : "ways",
|
146 |
+
"34" : "▁I",
|
147 |
+
"87" : "st",
|
148 |
+
"13" : "nd",
|
149 |
+
"350" : "▁every",
|
150 |
+
"866" : "D",
|
151 |
+
"545" : "▁V",
|
152 |
+
"182" : "if",
|
153 |
+
"615" : "cial",
|
154 |
+
"69" : "ke",
|
155 |
+
"276" : "so",
|
156 |
+
"732" : "ics",
|
157 |
+
"749" : "als",
|
158 |
+
"605" : "aking",
|
159 |
+
"857" : "-",
|
160 |
+
"446" : "▁down",
|
161 |
+
"41" : "ar",
|
162 |
+
"203" : "un",
|
163 |
+
"816" : "▁met",
|
164 |
+
"543" : "▁ind",
|
165 |
+
"926" : "æ",
|
166 |
+
"64" : "▁on",
|
167 |
+
"246" : "▁them",
|
168 |
+
"172" : "qu",
|
169 |
+
"229" : "▁tr",
|
170 |
+
"775" : "▁gener",
|
171 |
+
"592" : "ower",
|
172 |
+
"617" : "▁give",
|
173 |
+
"399" : "ical",
|
174 |
+
"319" : "ag",
|
175 |
+
"669" : "▁last",
|
176 |
+
"400" : "▁gr",
|
177 |
+
"389" : "ittle",
|
178 |
+
"533" : "▁fu",
|
179 |
+
"460" : "uct",
|
180 |
+
"825" : "i",
|
181 |
+
"992" : "Ç",
|
182 |
+
"432" : "▁much",
|
183 |
+
"638" : "ction",
|
184 |
+
"700" : "▁love",
|
185 |
+
"988" : "έ",
|
186 |
+
"658" : "▁inv",
|
187 |
+
"549" : "▁still",
|
188 |
+
"322" : "act",
|
189 |
+
"143" : "il",
|
190 |
+
"966" : "в",
|
191 |
+
"245" : "▁It",
|
192 |
+
"445" : "▁yeah",
|
193 |
+
"524" : "self",
|
194 |
+
"742" : "ata",
|
195 |
+
"380" : "kay",
|
196 |
+
"176" : "▁su",
|
197 |
+
"624" : "▁gen",
|
198 |
+
"766" : "▁saying",
|
199 |
+
"42" : "▁that",
|
200 |
+
"136" : "▁ab",
|
201 |
+
"224" : "▁by",
|
202 |
+
"811" : "▁getting",
|
203 |
+
"76" : "id",
|
204 |
+
"430" : "be",
|
205 |
+
"124" : "▁know",
|
206 |
+
"88" : "ch",
|
207 |
+
"420" : "▁she",
|
208 |
+
"822" : "o",
|
209 |
+
"991" : "Å",
|
210 |
+
"623" : "ily",
|
211 |
+
"230" : "▁then",
|
212 |
+
"65" : "▁T",
|
213 |
+
"86" : "▁st",
|
214 |
+
"873" : "J",
|
215 |
+
"464" : "▁br",
|
216 |
+
"165" : "pp",
|
217 |
+
"325" : "iz",
|
218 |
+
"237" : "▁F",
|
219 |
+
"939" : "č",
|
220 |
+
"250" : "▁say",
|
221 |
+
"423" : "ord",
|
222 |
+
"516" : "▁fl",
|
223 |
+
"1030" : "▁urge",
|
224 |
+
"977" : "ι",
|
225 |
+
"358" : "▁kind",
|
226 |
+
"471" : "co",
|
227 |
+
"7" : "▁w",
|
228 |
+
"252" : "▁people",
|
229 |
+
"12" : "er",
|
230 |
+
"932" : "ψ",
|
231 |
+
"632" : "▁cour",
|
232 |
+
"440" : "▁comm",
|
233 |
+
"664" : "▁cr",
|
234 |
+
"637" : "▁num",
|
235 |
+
"588" : "▁To",
|
236 |
+
"286" : "▁sa",
|
237 |
+
"292" : "pt",
|
238 |
+
"772" : "▁better",
|
239 |
+
"396" : "▁him",
|
240 |
+
"1007" : "Γ",
|
241 |
+
"26" : "▁d",
|
242 |
+
"450" : "ens",
|
243 |
+
"780" : "cept",
|
244 |
+
"936" : "ω",
|
245 |
+
"123" : "▁And",
|
246 |
+
"349" : "▁need",
|
247 |
+
"536" : "we",
|
248 |
+
"882" : "8",
|
249 |
+
"546" : "he",
|
250 |
+
"854" : "?",
|
251 |
+
"875" : ":",
|
252 |
+
"712" : "▁exam",
|
253 |
+
"437" : "ary",
|
254 |
+
"394" : "ip",
|
255 |
+
"344" : "own",
|
256 |
+
"709" : "▁fin",
|
257 |
+
"457" : "▁K",
|
258 |
+
"996" : "χ",
|
259 |
+
"990" : "ы",
|
260 |
+
"89" : "▁li",
|
261 |
+
"878" : "3",
|
262 |
+
"503" : "fe",
|
263 |
+
"61" : "et",
|
264 |
+
"782" : "▁understand",
|
265 |
+
"126" : "op",
|
266 |
+
"135" : "▁at",
|
267 |
+
"831" : "u",
|
268 |
+
"456" : "ody",
|
269 |
+
"594" : "▁okay",
|
270 |
+
"695" : "erest",
|
271 |
+
"324" : "▁also",
|
272 |
+
"51" : "▁it",
|
273 |
+
"542" : "▁rem",
|
274 |
+
"174" : "▁ex",
|
275 |
+
"1006" : "ț",
|
276 |
+
"128" : "▁or",
|
277 |
+
"357" : "ue",
|
278 |
+
"641" : "▁um",
|
279 |
+
"802" : "▁ele",
|
280 |
+
"213" : "▁some",
|
281 |
+
"606" : "▁pos",
|
282 |
+
"929" : "ν",
|
283 |
+
"885" : "!",
|
284 |
+
"784" : "▁thought",
|
285 |
+
"716" : "▁stud",
|
286 |
+
"232" : "▁pl",
|
287 |
+
"522" : "ces",
|
288 |
+
"184" : "▁if",
|
289 |
+
"37" : "is",
|
290 |
+
"867" : "N",
|
291 |
+
"653" : "uc",
|
292 |
+
"413" : "ult",
|
293 |
+
"167" : "ess",
|
294 |
+
"22" : "en",
|
295 |
+
"439" : "ving",
|
296 |
+
"104" : "▁r",
|
297 |
+
"755" : "oc",
|
298 |
+
"63" : "▁re",
|
299 |
+
"622" : "ward",
|
300 |
+
"1003" : "Ł",
|
301 |
+
"468" : "▁use",
|
302 |
+
"879" : "K",
|
303 |
+
"838" : "p",
|
304 |
+
"11" : "ou",
|
305 |
+
"54" : "le",
|
306 |
+
"118" : "▁not",
|
307 |
+
"558" : "ft",
|
308 |
+
"858" : "M",
|
309 |
+
"579" : "▁before",
|
310 |
+
"652" : "les",
|
311 |
+
"98" : "▁do",
|
312 |
+
"1026" : "▁issue",
|
313 |
+
"360" : "▁back",
|
314 |
+
"790" : "ason",
|
315 |
+
"731" : "▁today",
|
316 |
+
"706" : "▁count",
|
317 |
+
"650" : "▁didn",
|
318 |
+
"348" : "▁where",
|
319 |
+
"371" : "ep",
|
320 |
+
"898" : "ü",
|
321 |
+
"384" : "▁two",
|
322 |
+
"121" : "▁B",
|
323 |
+
"720" : "▁used",
|
324 |
+
"120" : "ight",
|
325 |
+
"1022" : "ξ",
|
326 |
+
"345" : "▁tw",
|
327 |
+
"307" : "▁work",
|
328 |
+
"655" : "ating",
|
329 |
+
"745" : "ween",
|
330 |
+
"596" : "▁bel",
|
331 |
+
"852" : "B",
|
332 |
+
"986" : "ì",
|
333 |
+
"197" : "▁get",
|
334 |
+
"79" : "▁he",
|
335 |
+
"504" : "▁doing",
|
336 |
+
"644" : "▁own",
|
337 |
+
"791" : "▁problem",
|
338 |
+
"931" : "γ",
|
339 |
+
"33" : "▁l",
|
340 |
+
"187" : "ab",
|
341 |
+
"771" : "▁between",
|
342 |
+
"783" : "▁fun",
|
343 |
+
"823" : "a",
|
344 |
+
"723" : "▁No",
|
345 |
+
"303" : "▁mo",
|
346 |
+
"482" : "ition",
|
347 |
+
"164" : "▁M",
|
348 |
+
"346" : "▁part",
|
349 |
+
"103" : "ad",
|
350 |
+
"902" : "ç",
|
351 |
+
"781" : "ull",
|
352 |
+
"411" : "▁actually",
|
353 |
+
"598" : "ful",
|
354 |
+
"1002" : "ħ",
|
355 |
+
"351" : "pl",
|
356 |
+
"343" : "▁into",
|
357 |
+
"908" : "ú",
|
358 |
+
"337" : "ite",
|
359 |
+
"894" : "á",
|
360 |
+
"912" : "ī",
|
361 |
+
"864" : "z",
|
362 |
+
"925" : "τ",
|
363 |
+
"153" : "▁con",
|
364 |
+
"144" : "▁but",
|
365 |
+
"433" : "▁per",
|
366 |
+
"711" : "▁pol",
|
367 |
+
"631" : "▁sm",
|
368 |
+
"431" : "ount",
|
369 |
+
"999" : "Í",
|
370 |
+
"946" : "ø",
|
371 |
+
"141" : "use",
|
372 |
+
"77" : "▁for",
|
373 |
+
"733" : "▁vide",
|
374 |
+
"730" : "▁For",
|
375 |
+
"418" : "ations",
|
376 |
+
"693" : "▁always",
|
377 |
+
"642" : "ood",
|
378 |
+
"1015" : "Ā",
|
379 |
+
"154" : "▁all",
|
380 |
+
"659" : "ably",
|
381 |
+
"1027" : "▁stay",
|
382 |
+
"777" : "▁ins",
|
383 |
+
"740" : "▁keep",
|
384 |
+
"96" : "▁so",
|
385 |
+
"806" : "▁partic",
|
386 |
+
"313" : "▁im",
|
387 |
+
"507" : "av",
|
388 |
+
"880" : "4",
|
389 |
+
"792" : "▁doesn",
|
390 |
+
"426" : "▁am",
|
391 |
+
"1001" : "ě",
|
392 |
+
"526" : "▁If",
|
393 |
+
"465" : "▁take",
|
394 |
+
"436" : "vel",
|
395 |
+
"116" : "ere",
|
396 |
+
"576" : "ever",
|
397 |
+
"568" : "oth",
|
398 |
+
"166" : "▁com",
|
399 |
+
"151" : "ul",
|
400 |
+
"798" : "ah",
|
401 |
+
"779" : "cond",
|
402 |
+
"556" : "▁end",
|
403 |
+
"161" : "ea",
|
404 |
+
"39" : "▁g",
|
405 |
+
"765" : "ention",
|
406 |
+
"100" : "th",
|
407 |
+
"461" : "▁only",
|
408 |
+
"590" : "▁hel",
|
409 |
+
"505" : "▁St",
|
410 |
+
"957" : "ń",
|
411 |
+
"876" : "5",
|
412 |
+
"580" : "▁feel",
|
413 |
+
"321" : "ans",
|
414 |
+
"972" : "ь",
|
415 |
+
"718" : "▁car",
|
416 |
+
"851" : "W",
|
417 |
+
"960" : "đ",
|
418 |
+
"469" : "▁Ch",
|
419 |
+
"291" : "one",
|
420 |
+
"83" : "ly",
|
421 |
+
"295" : "▁has",
|
422 |
+
"84" : "▁go",
|
423 |
+
"981" : "ő",
|
424 |
+
"923" : "λ",
|
425 |
+
"52" : "▁be",
|
426 |
+
"821" : "t",
|
427 |
+
"323" : "▁te",
|
428 |
+
"50" : "al",
|
429 |
+
"760" : "ense",
|
430 |
+
"149" : "ore",
|
431 |
+
"306" : "▁le",
|
432 |
+
"403" : "▁thr",
|
433 |
+
"628" : "ob",
|
434 |
+
"299" : "▁look",
|
435 |
+
"406" : "▁This",
|
436 |
+
"0" : "<unk>",
|
437 |
+
"75" : "all",
|
438 |
+
"475" : "▁call",
|
439 |
+
"341" : "reat",
|
440 |
+
"519" : "ents",
|
441 |
+
"66" : "▁A",
|
442 |
+
"186" : "nt",
|
443 |
+
"928" : "о",
|
444 |
+
"244" : "ople",
|
445 |
+
"393" : "ence",
|
446 |
+
"834" : "m",
|
447 |
+
"833" : "y",
|
448 |
+
"414" : "able",
|
449 |
+
"297" : "▁very",
|
450 |
+
"364" : "▁pr",
|
451 |
+
"865" : "L",
|
452 |
+
"35" : "it",
|
453 |
+
"339" : "omet",
|
454 |
+
"27" : "es",
|
455 |
+
"150" : "▁there",
|
456 |
+
"715" : "ell",
|
457 |
+
"677" : "▁import",
|
458 |
+
"681" : "▁ear",
|
459 |
+
"820" : "e",
|
460 |
+
"139" : "▁So",
|
461 |
+
"449" : "na",
|
462 |
+
"302" : "▁time",
|
463 |
+
"532" : "▁What",
|
464 |
+
"133" : "ck",
|
465 |
+
"970" : "ο",
|
466 |
+
"773" : "cus",
|
467 |
+
"228" : "▁us",
|
468 |
+
"552" : "▁wr",
|
469 |
+
"680" : "▁made",
|
470 |
+
"871" : "E",
|
471 |
+
"874" : "U",
|
472 |
+
"895" : "£",
|
473 |
+
"846" : "T",
|
474 |
+
"55" : "ion",
|
475 |
+
"492" : "ile",
|
476 |
+
"787" : "cy",
|
477 |
+
"105" : "ir",
|
478 |
+
"662" : "lic",
|
479 |
+
"629" : "▁tell",
|
480 |
+
"367" : "▁good",
|
481 |
+
"514" : "form",
|
482 |
+
"656" : "olog",
|
483 |
+
"334" : "ually",
|
484 |
+
"294" : "ong",
|
485 |
+
"485" : "ade",
|
486 |
+
"682" : "▁ac",
|
487 |
+
"106" : "▁was",
|
488 |
+
"1028" : "▁together",
|
489 |
+
"910" : "ã",
|
490 |
+
"142" : "ter",
|
491 |
+
"922" : "Δ",
|
492 |
+
"175" : "very",
|
493 |
+
"314" : "▁ag",
|
494 |
+
"327" : "▁That",
|
495 |
+
"826" : "s",
|
496 |
+
"183" : "ive",
|
497 |
+
"979" : "г",
|
498 |
+
"751" : "vern",
|
499 |
+
"997" : "э",
|
500 |
+
"278" : "eah",
|
501 |
+
"517" : "fter",
|
502 |
+
"311" : "per",
|
503 |
+
"535" : "▁show",
|
504 |
+
"918" : "^",
|
505 |
+
"954" : "š",
|
506 |
+
"722" : "stand",
|
507 |
+
"6" : "re",
|
508 |
+
"93" : "ce",
|
509 |
+
"435" : "▁differe",
|
510 |
+
"746" : "▁stuff",
|
511 |
+
"915" : "ρ",
|
512 |
+
"602" : "▁supp",
|
513 |
+
"209" : "▁L",
|
514 |
+
"767" : "▁commun",
|
515 |
+
"769" : "akes",
|
516 |
+
"375" : "▁lot",
|
517 |
+
"859" : "H",
|
518 |
+
"397" : "▁make",
|
519 |
+
"340" : "ber",
|
520 |
+
"886" : "%",
|
521 |
+
"736" : "▁Al",
|
522 |
+
"600" : "ise",
|
523 |
+
"938" : "ć",
|
524 |
+
"478" : "ting",
|
525 |
+
"962" : "м",
|
526 |
+
"138" : "ol",
|
527 |
+
"125" : "ome",
|
528 |
+
"309" : "are",
|
529 |
+
"673" : "▁inst",
|
530 |
+
"97" : "▁have",
|
531 |
+
"562" : "ject",
|
532 |
+
"678" : "ific",
|
533 |
+
"257" : "ect",
|
534 |
+
"17" : "on",
|
535 |
+
"953" : "с",
|
536 |
+
"933" : "ē",
|
537 |
+
"844" : "'",
|
538 |
+
"949" : "η",
|
539 |
+
"190" : "▁v",
|
540 |
+
"980" : "к",
|
541 |
+
"452" : "▁fo",
|
542 |
+
"247" : "ame",
|
543 |
+
"612" : "▁help",
|
544 |
+
"501" : "▁spe",
|
545 |
+
"604" : "ange",
|
546 |
+
"654" : "ib",
|
547 |
+
"842" : "k",
|
548 |
+
"815" : "ave",
|
549 |
+
"687" : "▁form",
|
550 |
+
"222" : "res",
|
551 |
+
"421" : "sel",
|
552 |
+
"477" : "other",
|
553 |
+
"308" : "▁their",
|
554 |
+
"212" : "▁N",
|
555 |
+
"737" : "▁important",
|
556 |
+
"90" : "▁u",
|
557 |
+
"630" : "▁Now",
|
558 |
+
"425" : "ia",
|
559 |
+
"14" : "▁i",
|
560 |
+
"315" : "▁J",
|
561 |
+
"331" : "▁fe",
|
562 |
+
"304" : "▁ar",
|
563 |
+
"29" : "ll",
|
564 |
+
"499" : "▁sc",
|
565 |
+
"919" : "€",
|
566 |
+
"301" : "itt",
|
567 |
+
"201" : "ri",
|
568 |
+
"137" : "ould",
|
569 |
+
"289" : "▁man",
|
570 |
+
"81" : "▁this",
|
571 |
+
"1008" : "П",
|
572 |
+
"312" : "ions",
|
573 |
+
"725" : "ks",
|
574 |
+
"837" : "f",
|
575 |
+
"458" : "▁through",
|
576 |
+
"714" : "▁maybe",
|
577 |
+
"487" : "thing",
|
578 |
+
"424" : "▁may",
|
579 |
+
"31" : "▁and",
|
580 |
+
"78" : "ro",
|
581 |
+
"961" : "л",
|
582 |
+
"956" : "å",
|
583 |
+
"198" : "cause",
|
584 |
+
"95" : "im",
|
585 |
+
"899" : "ñ",
|
586 |
+
"111" : "ally",
|
587 |
+
"523" : "▁There",
|
588 |
+
"538" : "ons",
|
589 |
+
"797" : "▁el",
|
590 |
+
"726" : "▁interest",
|
591 |
+
"53" : "▁wh",
|
592 |
+
"296" : "▁any",
|
593 |
+
"489" : "fore",
|
594 |
+
"911" : "φ",
|
595 |
+
"248" : "▁We",
|
596 |
+
"639" : "▁add",
|
597 |
+
"108" : "▁W",
|
598 |
+
"521" : "▁point",
|
599 |
+
"416" : "▁dis",
|
600 |
+
"739" : "▁run",
|
601 |
+
"747" : "ract",
|
602 |
+
"3" : "▁a",
|
603 |
+
"530" : "▁most",
|
604 |
+
"734" : "▁bec",
|
605 |
+
"338" : "age",
|
606 |
+
"544" : "▁pers",
|
607 |
+
"113" : "▁se",
|
608 |
+
"691" : "▁able",
|
609 |
+
"847" : "A",
|
610 |
+
"651" : "stem",
|
611 |
+
"115" : "od",
|
612 |
+
"527" : "▁same",
|
613 |
+
"575" : "ves",
|
614 |
+
"696" : "▁As",
|
615 |
+
"277" : "▁qu",
|
616 |
+
"728" : "ited",
|
617 |
+
"640" : "▁set",
|
618 |
+
"502" : "ub",
|
619 |
+
"497" : "▁try",
|
620 |
+
"881" : "V",
|
621 |
+
"219" : "▁G",
|
622 |
+
"561" : "ph",
|
623 |
+
"759" : "▁All",
|
624 |
+
"177" : "ain",
|
625 |
+
"515" : "ors",
|
626 |
+
"71" : "▁S",
|
627 |
+
"509" : "ian",
|
628 |
+
"803" : "▁cou",
|
629 |
+
"25" : "an",
|
630 |
+
"583" : "iss",
|
631 |
+
"417" : "▁first",
|
632 |
+
"840" : "b",
|
633 |
+
"768" : "▁An",
|
634 |
+
"419" : "▁something",
|
635 |
+
"569" : "▁acc",
|
636 |
+
"607" : "atch",
|
637 |
+
"534" : "ug",
|
638 |
+
"195" : "▁my",
|
639 |
+
"832" : "c",
|
640 |
+
"634" : "cess",
|
641 |
+
"809" : "▁everything",
|
642 |
+
"1" : "▁t",
|
643 |
+
"557" : "▁bas",
|
644 |
+
"481" : "▁inc",
|
645 |
+
"57" : "ot",
|
646 |
+
"518" : "ail",
|
647 |
+
"924" : "α",
|
648 |
+
"173" : "▁lo",
|
649 |
+
"905" : "μ",
|
650 |
+
"762" : "▁probably",
|
651 |
+
"626" : "▁dec",
|
652 |
+
"647" : "▁its",
|
653 |
+
"415" : "orm",
|
654 |
+
"917" : "ô",
|
655 |
+
"560" : "body",
|
656 |
+
"474" : "▁put",
|
657 |
+
"572" : "▁em",
|
658 |
+
"689" : "▁system",
|
659 |
+
"909" : "θ",
|
660 |
+
"408" : "▁res",
|
661 |
+
"862" : "1",
|
662 |
+
"830" : "d",
|
663 |
+
"748" : "▁question",
|
664 |
+
"285" : "▁now",
|
665 |
+
"717" : "▁prod",
|
666 |
+
"60" : "▁e",
|
667 |
+
"818" : "oney",
|
668 |
+
"814" : "▁Because",
|
669 |
+
"893" : "í",
|
670 |
+
"587" : "▁uh",
|
671 |
+
"377" : "▁things",
|
672 |
+
"454" : "▁ro",
|
673 |
+
"205" : "▁up",
|
674 |
+
"849" : "j",
|
675 |
+
"152" : "out",
|
676 |
+
"994" : "ί",
|
677 |
+
"789" : "ope",
|
678 |
+
"1018" : "ź",
|
679 |
+
"973" : "ž",
|
680 |
+
"950" : "п",
|
681 |
+
"901" : "è",
|
682 |
+
"288" : "▁Wh",
|
683 |
+
"710" : "▁prob",
|
684 |
+
"581" : "igh",
|
685 |
+
"1009" : "д",
|
686 |
+
"47" : "us",
|
687 |
+
"756" : "ness",
|
688 |
+
"648" : "▁God",
|
689 |
+
"43" : "om",
|
690 |
+
"952" : "н",
|
691 |
+
"744" : "▁never",
|
692 |
+
"688" : "▁guys",
|
693 |
+
"272" : "▁co",
|
694 |
+
"36" : "▁in",
|
695 |
+
"405" : "ated",
|
696 |
+
"741" : "▁fact",
|
697 |
+
"56" : "ut",
|
698 |
+
"290" : "ous",
|
699 |
+
"770" : "▁belie",
|
700 |
+
"943" : "ò",
|
701 |
+
"284" : "▁how",
|
702 |
+
"697" : "▁mod",
|
703 |
+
"671" : "▁att",
|
704 |
+
"453" : "▁comp",
|
705 |
+
"690" : "ew",
|
706 |
+
"101" : "▁an",
|
707 |
+
"264" : "ven",
|
708 |
+
"200" : "▁don",
|
709 |
+
"279" : "▁were",
|
710 |
+
"99" : "ht",
|
711 |
+
"305" : "hing",
|
712 |
+
"1014" : "Î",
|
713 |
+
"525" : "▁many",
|
714 |
+
"738" : "▁such",
|
715 |
+
"940" : "ε",
|
716 |
+
"995" : "ζ",
|
717 |
+
"491" : "ark",
|
718 |
+
"18" : "▁h",
|
719 |
+
"947" : "ł",
|
720 |
+
"702" : "▁ask",
|
721 |
+
"49" : "ow",
|
722 |
+
"808" : "▁gl",
|
723 |
+
"484" : "▁should",
|
724 |
+
"907" : "ä",
|
725 |
+
"916" : "â",
|
726 |
+
"447" : "ang",
|
727 |
+
"107" : "▁as",
|
728 |
+
"843" : "v",
|
729 |
+
"1025" : "▁global",
|
730 |
+
"267" : "▁really",
|
731 |
+
"892" : "\/",
|
732 |
+
"703" : "old",
|
733 |
+
"679" : "ix",
|
734 |
+
"601" : "▁ob",
|
735 |
+
"498" : "ious",
|
736 |
+
"298" : "▁But",
|
737 |
+
"300" : "iv",
|
738 |
+
"565" : "▁Is",
|
739 |
+
"282" : "ther",
|
740 |
+
"249" : "our",
|
741 |
+
"539" : "▁Be",
|
742 |
+
"356" : "ap",
|
743 |
+
"528" : "▁sy",
|
744 |
+
"470" : "xt",
|
745 |
+
"373" : "ick",
|
746 |
+
"853" : "C",
|
747 |
+
"215" : "and",
|
748 |
+
"869" : "F",
|
749 |
+
"10" : "at",
|
750 |
+
"1004" : "œ",
|
751 |
+
"686" : "ative",
|
752 |
+
"553" : "ought",
|
753 |
+
"976" : "ę",
|
754 |
+
"473" : "ild",
|
755 |
+
"4" : "in",
|
756 |
+
"352" : "▁ad",
|
757 |
+
"951" : "ë",
|
758 |
+
"265" : "▁our",
|
759 |
+
"427" : "▁her",
|
760 |
+
"676" : "▁rep",
|
761 |
+
"982" : "Ś",
|
762 |
+
"243" : "ies",
|
763 |
+
"48" : "ic",
|
764 |
+
"978" : "б",
|
765 |
+
"28" : "or",
|
766 |
+
"566" : "ates",
|
767 |
+
"965" : "ș",
|
768 |
+
"9" : "▁s",
|
769 |
+
"92" : "ur",
|
770 |
+
"16" : "▁c",
|
771 |
+
"30" : "▁of",
|
772 |
+
"15" : "▁b",
|
773 |
+
"547" : "▁str",
|
774 |
+
"645" : "▁life",
|
775 |
+
"24" : "▁p",
|
776 |
+
"310" : "▁his",
|
777 |
+
"472" : "ory",
|
778 |
+
"189" : "▁going",
|
779 |
+
"699" : "ings",
|
780 |
+
"1029" : "▁bipartisan",
|
781 |
+
"178" : "▁one",
|
782 |
+
"67" : "▁ha",
|
783 |
+
"824" : "n",
|
784 |
+
"422" : "▁let",
|
785 |
+
"888" : "$",
|
786 |
+
"649" : "pect",
|
787 |
+
"635" : "nds",
|
788 |
+
"540" : "ically",
|
789 |
+
"511" : "red",
|
790 |
+
"827" : "h",
|
791 |
+
"19" : "ing",
|
792 |
+
"969" : "ù",
|
793 |
+
"985" : "υ",
|
794 |
+
"225" : "ake",
|
795 |
+
"239" : "ard",
|
796 |
+
"618" : "ike",
|
797 |
+
"240" : "▁right",
|
798 |
+
"585" : "ne",
|
799 |
+
"378" : "▁In",
|
800 |
+
"616" : "▁world",
|
801 |
+
"130" : "▁me",
|
802 |
+
"434" : "▁even",
|
803 |
+
"599" : "te",
|
804 |
+
"897" : "ā",
|
805 |
+
"206" : "▁P",
|
806 |
+
"336" : "▁U",
|
807 |
+
"82" : "ld",
|
808 |
+
"877" : "9",
|
809 |
+
"368" : "▁than",
|
810 |
+
"1016" : "ė",
|
811 |
+
"181" : "ist",
|
812 |
+
"438" : "▁app",
|
813 |
+
"611" : "ert",
|
814 |
+
"567" : "▁ph",
|
815 |
+
"374" : "way",
|
816 |
+
"395" : "ase",
|
817 |
+
"621" : "▁min",
|
818 |
+
"188" : "▁about",
|
819 |
+
"663" : "▁stu",
|
820 |
+
"571" : "▁years",
|
821 |
+
"94" : "ith",
|
822 |
+
"758" : "ize",
|
823 |
+
"180" : "art",
|
824 |
+
"40" : "▁you",
|
825 |
+
"904" : "¿",
|
826 |
+
"845" : "I",
|
827 |
+
"268" : "▁more",
|
828 |
+
"253" : "▁see",
|
829 |
+
"683" : "▁def",
|
830 |
+
"856" : "O",
|
831 |
+
"370" : "▁gu",
|
832 |
+
"551" : "▁rec",
|
833 |
+
"8" : "▁o",
|
834 |
+
"428" : "▁said",
|
835 |
+
"675" : "▁happen",
|
836 |
+
"102" : "▁with",
|
837 |
+
"776" : "▁ca",
|
838 |
+
"363" : "▁somet",
|
839 |
+
"757" : "arch",
|
840 |
+
"914" : "ê",
|
841 |
+
"208" : "ort",
|
842 |
+
"660" : "▁sure",
|
843 |
+
"207" : "▁out",
|
844 |
+
"613" : "ost",
|
845 |
+
"942" : "и",
|
846 |
+
"761" : "blem",
|
847 |
+
"117" : "▁like",
|
848 |
+
"129" : "▁sh",
|
849 |
+
"293" : "ff",
|
850 |
+
"407" : "▁off",
|
851 |
+
"595" : "▁long",
|
852 |
+
"366" : "ire",
|
853 |
+
"614" : "▁too",
|
854 |
+
"256" : "ure",
|
855 |
+
"328" : "▁cl",
|
856 |
+
"793" : "ational",
|
857 |
+
"724" : "▁mon",
|
858 |
+
"989" : "х",
|
859 |
+
"134" : "▁what",
|
860 |
+
"45" : "▁n",
|
861 |
+
"707" : "ility",
|
862 |
+
"62" : "ay",
|
863 |
+
"801" : "▁four",
|
864 |
+
"392" : "▁those",
|
865 |
+
"160" : "ge",
|
866 |
+
"369" : "ace",
|
867 |
+
"236" : "▁will",
|
868 |
+
"80" : "se",
|
869 |
+
"708" : "▁high",
|
870 |
+
"217" : "em",
|
871 |
+
"944" : "р",
|
872 |
+
"672" : "▁op",
|
873 |
+
"754" : "▁Of",
|
874 |
+
"58" : "▁we",
|
875 |
+
"131" : "ill",
|
876 |
+
"810" : "▁eff",
|
877 |
+
"146" : "▁ne",
|
878 |
+
"998" : "Æ",
|
879 |
+
"496" : "▁being",
|
880 |
+
"975" : "у",
|
881 |
+
"625" : "▁find",
|
882 |
+
"132" : "ant",
|
883 |
+
"884" : "7",
|
884 |
+
"382" : "▁could",
|
885 |
+
"459" : "▁start",
|
886 |
+
"147" : "▁de",
|
887 |
+
"448" : "▁mean",
|
888 |
+
"1019" : "Κ",
|
889 |
+
"241" : "▁thing",
|
890 |
+
"819" : "▁",
|
891 |
+
"698" : "▁done",
|
892 |
+
"444" : "ress",
|
893 |
+
"46" : "ve",
|
894 |
+
"971" : "ч",
|
895 |
+
"646" : "ities",
|
896 |
+
"713" : "▁pres",
|
897 |
+
"483" : "▁different",
|
898 |
+
"577" : "▁inter",
|
899 |
+
"372" : "og",
|
900 |
+
"786" : "iew",
|
901 |
+
"692" : "ied",
|
902 |
+
"362" : "iff",
|
903 |
+
"1021" : "ά",
|
904 |
+
"586" : "▁why",
|
905 |
+
"636" : "▁big",
|
906 |
+
"1023" : "ό",
|
907 |
+
"162" : "▁Y",
|
908 |
+
"685" : "▁next",
|
909 |
+
"193" : "ok",
|
910 |
+
"221" : "▁D",
|
911 |
+
"665" : "▁ev",
|
912 |
+
"159" : "▁pro",
|
913 |
+
"59" : "▁is",
|
914 |
+
"1017" : "Š",
|
915 |
+
"74" : "▁Th",
|
916 |
+
"813" : "▁Am",
|
917 |
+
"5" : "▁the",
|
918 |
+
"218" : "oug",
|
919 |
+
"376" : "▁un",
|
920 |
+
"270" : "ose",
|
921 |
+
"140" : "▁C",
|
922 |
+
"122" : "▁they",
|
923 |
+
"412" : "▁talk",
|
924 |
+
"913" : "σ",
|
925 |
+
"326" : "ice",
|
926 |
+
"451" : "▁does",
|
927 |
+
"559" : "erm",
|
928 |
+
"23" : "▁f",
|
929 |
+
"210" : "ment",
|
930 |
+
"778" : "▁ass",
|
931 |
+
"984" : "κ",
|
932 |
+
"316" : "▁no",
|
933 |
+
"666" : "ments",
|
934 |
+
"494" : "▁come",
|
935 |
+
"887" : "Q",
|
936 |
+
"891" : "é",
|
937 |
+
"404" : "uch",
|
938 |
+
"920" : "É",
|
939 |
+
"455" : "▁bl",
|
940 |
+
"170" : "est",
|
941 |
+
"72" : "ig",
|
942 |
+
"443" : "▁again",
|
943 |
+
"727" : "▁ent",
|
944 |
+
"983" : "Ω",
|
945 |
+
"410" : "ance",
|
946 |
+
"281" : "▁act",
|
947 |
+
"763" : "hip",
|
948 |
+
"220" : "os",
|
949 |
+
"261" : "▁You",
|
950 |
+
"214" : "ich",
|
951 |
+
"930" : "î",
|
952 |
+
"805" : "▁called",
|
953 |
+
"156" : "ers",
|
954 |
+
"753" : "▁course",
|
955 |
+
"868" : "G",
|
956 |
+
"238" : "▁which",
|
957 |
+
"836" : "w",
|
958 |
+
"704" : "ered",
|
959 |
+
"513" : "▁ke",
|
960 |
+
"964" : "û",
|
961 |
+
"589" : "▁cons",
|
962 |
+
"409" : "ac",
|
963 |
+
"609" : "gr",
|
964 |
+
"476" : "▁new",
|
965 |
+
"619" : "▁Okay",
|
966 |
+
"287" : "ud",
|
967 |
+
"603" : "ady",
|
968 |
+
"633" : "▁real",
|
969 |
+
"508" : "ty",
|
970 |
+
"1012" : "¡",
|
971 |
+
"401" : "▁year",
|
972 |
+
"110" : "ust",
|
973 |
+
"196" : "ind",
|
974 |
+
"242" : "▁want",
|
975 |
+
"817" : "▁Like",
|
976 |
+
"890" : "X",
|
977 |
+
"191" : "▁wor",
|
978 |
+
"402" : "ass",
|
979 |
+
"266" : "ci",
|
980 |
+
"347" : "alk",
|
981 |
+
"387" : "ach",
|
982 |
+
"804" : "ont",
|
983 |
+
"701" : "ism",
|
984 |
+
"841" : ".",
|
985 |
+
"317" : "▁en",
|
986 |
+
"385" : "irst",
|
987 |
+
"255" : "ast",
|
988 |
+
"510" : "onna",
|
989 |
+
"20" : "▁to",
|
990 |
+
"667" : "▁another",
|
991 |
+
"591" : "▁after",
|
992 |
+
"354" : "▁over",
|
993 |
+
"112" : "▁j",
|
994 |
+
"674" : "▁sl",
|
995 |
+
"935" : "ß",
|
996 |
+
"204" : "ity",
|
997 |
+
"735" : "▁Well",
|
998 |
+
"900" : "ö",
|
999 |
+
"163" : "▁O",
|
1000 |
+
"169" : "▁al",
|
1001 |
+
"531" : "▁great",
|
1002 |
+
"185" : "ink",
|
1003 |
+
"752" : "ather",
|
1004 |
+
"795" : "▁trying",
|
1005 |
+
"796" : "▁sch",
|
1006 |
+
"721" : "oy",
|
1007 |
+
"657" : "▁person",
|
1008 |
+
"799" : "atter",
|
1009 |
+
"554" : "day",
|
1010 |
+
"274" : "ough",
|
1011 |
+
"109" : "▁are",
|
1012 |
+
"643" : "ible",
|
1013 |
+
"968" : "Ø",
|
1014 |
+
"269" : "ound",
|
1015 |
+
"145" : "▁just",
|
1016 |
+
"263" : "▁when",
|
1017 |
+
"85" : "▁k",
|
1018 |
+
"365" : "nder",
|
1019 |
+
"359" : "▁po",
|
1020 |
+
"937" : "ï",
|
1021 |
+
"226" : "▁int",
|
1022 |
+
"584" : "▁des",
|
1023 |
+
"512" : "wn",
|
1024 |
+
"495" : "▁They",
|
1025 |
+
"391" : "▁He",
|
1026 |
+
"883" : "6",
|
1027 |
+
"627" : "ular",
|
1028 |
+
"927" : "а",
|
1029 |
+
"941" : "е",
|
1030 |
+
"330" : "▁way",
|
1031 |
+
"860" : "Y",
|
1032 |
+
"462" : "▁bet"
|
1033 |
+
}
|