New melspec model to support shorter audio chunks
Browse files
Melspectrogram_v2.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4300d03c3fc52901866a5f578d2673ec12c95deaf86b2131c83848339212d218
|
| 3 |
+
size 243
|
Melspectrogram_v2.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:96c3a2e963dbc871fda545e9ed452173f58be0d995d2d91693361069b7189ee1
|
| 3 |
+
size 402
|
Melspectrogram_v2.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Float16",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[]",
|
| 13 |
+
"name" : "melspectrogram",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"hasShapeFlexibility" : "0",
|
| 18 |
+
"isOptional" : "0",
|
| 19 |
+
"dataType" : "Int32",
|
| 20 |
+
"formattedType" : "MultiArray (Int32 1)",
|
| 21 |
+
"shortDescription" : "",
|
| 22 |
+
"shape" : "[1]",
|
| 23 |
+
"name" : "melspectrogram_length",
|
| 24 |
+
"type" : "MultiArray"
|
| 25 |
+
}
|
| 26 |
+
],
|
| 27 |
+
"modelParameters" : [
|
| 28 |
+
|
| 29 |
+
],
|
| 30 |
+
"specificationVersion" : 8,
|
| 31 |
+
"mlProgramOperationTypeHistogram" : {
|
| 32 |
+
"Range1d" : 2,
|
| 33 |
+
"Ios17.reshape" : 2,
|
| 34 |
+
"Identity" : 1,
|
| 35 |
+
"Ios17.matmul" : 1,
|
| 36 |
+
"Ios17.expandDims" : 10,
|
| 37 |
+
"Select" : 3,
|
| 38 |
+
"Ios17.add" : 4,
|
| 39 |
+
"Tile" : 2,
|
| 40 |
+
"Ios17.sliceByIndex" : 3,
|
| 41 |
+
"Ios16.reduceSum" : 4,
|
| 42 |
+
"Shape" : 3,
|
| 43 |
+
"Ios17.gather" : 3,
|
| 44 |
+
"Pad" : 1,
|
| 45 |
+
"Ios17.log" : 1,
|
| 46 |
+
"Ios17.conv" : 2,
|
| 47 |
+
"Ios17.sub" : 4,
|
| 48 |
+
"Ios17.pow" : 2,
|
| 49 |
+
"Ios17.cast" : 9,
|
| 50 |
+
"Ios17.realDiv" : 4,
|
| 51 |
+
"Stack" : 1,
|
| 52 |
+
"Ios17.concat" : 3,
|
| 53 |
+
"Ios17.floorDiv" : 1,
|
| 54 |
+
"Ios17.less" : 1,
|
| 55 |
+
"Ios17.clip" : 2,
|
| 56 |
+
"Ios17.sqrt" : 1,
|
| 57 |
+
"Ios17.greaterEqual" : 1,
|
| 58 |
+
"Ios17.mul" : 1
|
| 59 |
+
},
|
| 60 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32, UInt16)",
|
| 61 |
+
"isUpdatable" : "0",
|
| 62 |
+
"stateSchema" : [
|
| 63 |
+
|
| 64 |
+
],
|
| 65 |
+
"availability" : {
|
| 66 |
+
"macOS" : "14.0",
|
| 67 |
+
"tvOS" : "17.0",
|
| 68 |
+
"visionOS" : "1.0",
|
| 69 |
+
"watchOS" : "10.0",
|
| 70 |
+
"iOS" : "17.0",
|
| 71 |
+
"macCatalyst" : "17.0"
|
| 72 |
+
},
|
| 73 |
+
"modelType" : {
|
| 74 |
+
"name" : "MLModelType_mlProgram"
|
| 75 |
+
},
|
| 76 |
+
"userDefinedMetadata" : {
|
| 77 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 78 |
+
"com.github.apple.coremltools.source" : "torch==2.5.0",
|
| 79 |
+
"com.github.apple.coremltools.version" : "8.3.0"
|
| 80 |
+
},
|
| 81 |
+
"inputSchema" : [
|
| 82 |
+
{
|
| 83 |
+
"dataType" : "Float32",
|
| 84 |
+
"hasShapeFlexibility" : "1",
|
| 85 |
+
"isOptional" : "0",
|
| 86 |
+
"shapeFlexibility" : "1 × 160...480000",
|
| 87 |
+
"shapeRange" : "[[1, 1], [160, 480000]]",
|
| 88 |
+
"formattedType" : "MultiArray (Float32 1 × 160)",
|
| 89 |
+
"type" : "MultiArray",
|
| 90 |
+
"shape" : "[1, 160]",
|
| 91 |
+
"name" : "audio_signal",
|
| 92 |
+
"shortDescription" : ""
|
| 93 |
+
},
|
| 94 |
+
{
|
| 95 |
+
"hasShapeFlexibility" : "0",
|
| 96 |
+
"isOptional" : "0",
|
| 97 |
+
"dataType" : "Int32",
|
| 98 |
+
"formattedType" : "MultiArray (Int32 1)",
|
| 99 |
+
"shortDescription" : "",
|
| 100 |
+
"shape" : "[1]",
|
| 101 |
+
"name" : "audio_length",
|
| 102 |
+
"type" : "MultiArray"
|
| 103 |
+
}
|
| 104 |
+
],
|
| 105 |
+
"generatedClassName" : "FlexibleMelspectrogram_fixed",
|
| 106 |
+
"method" : "predict"
|
| 107 |
+
}
|
| 108 |
+
]
|
Melspectrogram_v2.mlmodelc/model.mil
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
program(1.0)
|
| 2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.3.0"}})]
|
| 3 |
+
{
|
| 4 |
+
func main<ios17>(tensor<int32, [1]> audio_length, tensor<fp32, [1, ?]> audio_signal) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, list<tensor<int32, [2]>, ?>>>>((("DefaultShapes", {{"audio_signal", [1, 160]}}), ("RangeDims", {{"audio_signal", [[1, 1], [160, 480000]]}})))] {
|
| 5 |
+
tensor<int32, []> var_6 = const()[name = tensor<string, []>("op_6"), val = tensor<int32, []>(512)];
|
| 6 |
+
tensor<int32, [1]> var_7 = add(x = audio_length, y = var_6)[name = tensor<string, []>("op_7")];
|
| 7 |
+
tensor<int32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<int32, []>(512)];
|
| 8 |
+
tensor<int32, [1]> var_10 = sub(x = var_7, y = var_9)[name = tensor<string, []>("op_10")];
|
| 9 |
+
tensor<int32, []> var_11 = const()[name = tensor<string, []>("op_11"), val = tensor<int32, []>(160)];
|
| 10 |
+
tensor<int32, [1]> floor_div_0 = floor_div(x = var_10, y = var_11)[name = tensor<string, []>("floor_div_0")];
|
| 11 |
+
tensor<string, []> var_12_to_fp16_dtype_0 = const()[name = tensor<string, []>("op_12_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
| 12 |
+
tensor<fp16, []> var_14_promoted_to_fp16 = const()[name = tensor<string, []>("op_14_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
| 13 |
+
tensor<fp16, [1]> floor_div_0_to_fp16 = cast(dtype = var_12_to_fp16_dtype_0, x = floor_div_0)[name = tensor<string, []>("cast_23")];
|
| 14 |
+
tensor<fp16, [1]> seq_len_1_cast_fp16 = add(x = floor_div_0_to_fp16, y = var_14_promoted_to_fp16)[name = tensor<string, []>("seq_len_1_cast_fp16")];
|
| 15 |
+
tensor<string, []> cast_0_dtype_0 = const()[name = tensor<string, []>("cast_0_dtype_0"), val = tensor<string, []>("int32")];
|
| 16 |
+
tensor<int32, [2]> var_28_begin_0 = const()[name = tensor<string, []>("op_28_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
| 17 |
+
tensor<int32, [2]> var_28_end_0 = const()[name = tensor<string, []>("op_28_end_0"), val = tensor<int32, [2]>([1, 1])];
|
| 18 |
+
tensor<bool, [2]> var_28_end_mask_0 = const()[name = tensor<string, []>("op_28_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
| 19 |
+
tensor<bool, [2]> var_28_squeeze_mask_0 = const()[name = tensor<string, []>("op_28_squeeze_mask_0"), val = tensor<bool, [2]>([false, true])];
|
| 20 |
+
tensor<string, []> audio_signal_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_signal_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
| 21 |
+
tensor<fp16, [1, ?]> audio_signal_to_fp16 = cast(dtype = audio_signal_to_fp16_dtype_0, x = audio_signal)[name = tensor<string, []>("cast_21")];
|
| 22 |
+
tensor<fp16, [1]> var_28_cast_fp16 = slice_by_index(begin = var_28_begin_0, end = var_28_end_0, end_mask = var_28_end_mask_0, squeeze_mask = var_28_squeeze_mask_0, x = audio_signal_to_fp16)[name = tensor<string, []>("op_28_cast_fp16")];
|
| 23 |
+
tensor<int32, [1]> var_30_axes_0 = const()[name = tensor<string, []>("op_30_axes_0"), val = tensor<int32, [1]>([1])];
|
| 24 |
+
tensor<fp16, [1, 1]> var_30_cast_fp16 = expand_dims(axes = var_30_axes_0, x = var_28_cast_fp16)[name = tensor<string, []>("op_30_cast_fp16")];
|
| 25 |
+
tensor<int32, [2]> var_40_begin_0 = const()[name = tensor<string, []>("op_40_begin_0"), val = tensor<int32, [2]>([0, 1])];
|
| 26 |
+
tensor<int32, [2]> var_40_end_0 = const()[name = tensor<string, []>("op_40_end_0"), val = tensor<int32, [2]>([1, 0])];
|
| 27 |
+
tensor<bool, [2]> var_40_end_mask_0 = const()[name = tensor<string, []>("op_40_end_mask_0"), val = tensor<bool, [2]>([true, true])];
|
| 28 |
+
tensor<fp16, [1, ?]> var_40_cast_fp16 = slice_by_index(begin = var_40_begin_0, end = var_40_end_0, end_mask = var_40_end_mask_0, x = audio_signal_to_fp16)[name = tensor<string, []>("op_40_cast_fp16")];
|
| 29 |
+
tensor<int32, [2]> var_50_begin_0 = const()[name = tensor<string, []>("op_50_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
| 30 |
+
tensor<int32, [2]> var_50_end_0 = const()[name = tensor<string, []>("op_50_end_0"), val = tensor<int32, [2]>([1, -1])];
|
| 31 |
+
tensor<bool, [2]> var_50_end_mask_0 = const()[name = tensor<string, []>("op_50_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
| 32 |
+
tensor<fp16, [1, ?]> var_50_cast_fp16 = slice_by_index(begin = var_50_begin_0, end = var_50_end_0, end_mask = var_50_end_mask_0, x = audio_signal_to_fp16)[name = tensor<string, []>("op_50_cast_fp16")];
|
| 33 |
+
tensor<fp16, []> var_51_to_fp16 = const()[name = tensor<string, []>("op_51_to_fp16"), val = tensor<fp16, []>(0x1.f0cp-1)];
|
| 34 |
+
tensor<fp16, [1, ?]> var_52_cast_fp16 = mul(x = var_50_cast_fp16, y = var_51_to_fp16)[name = tensor<string, []>("op_52_cast_fp16")];
|
| 35 |
+
tensor<fp16, [1, ?]> var_54_cast_fp16 = sub(x = var_40_cast_fp16, y = var_52_cast_fp16)[name = tensor<string, []>("op_54_cast_fp16")];
|
| 36 |
+
tensor<int32, []> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<int32, []>(1)];
|
| 37 |
+
tensor<bool, []> input_1_interleave_0 = const()[name = tensor<string, []>("input_1_interleave_0"), val = tensor<bool, []>(false)];
|
| 38 |
+
tensor<fp16, [1, ?]> input_1_cast_fp16 = concat(axis = var_56, interleave = input_1_interleave_0, values = (var_30_cast_fp16, var_54_cast_fp16))[name = tensor<string, []>("input_1_cast_fp16")];
|
| 39 |
+
tensor<int32, [3]> concat_0x = const()[name = tensor<string, []>("concat_0x"), val = tensor<int32, [3]>([1, 1, -1])];
|
| 40 |
+
tensor<fp16, [1, 1, ?]> input_3_cast_fp16 = reshape(shape = concat_0x, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
| 41 |
+
tensor<int32, [6]> input_5_pad_0 = const()[name = tensor<string, []>("input_5_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 256, 256])];
|
| 42 |
+
tensor<string, []> input_5_mode_0 = const()[name = tensor<string, []>("input_5_mode_0"), val = tensor<string, []>("reflect")];
|
| 43 |
+
tensor<fp16, []> const_0_to_fp16 = const()[name = tensor<string, []>("const_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 44 |
+
tensor<fp16, [1, 1, ?]> input_5_cast_fp16 = pad(constant_val = const_0_to_fp16, mode = input_5_mode_0, pad = input_5_pad_0, x = input_3_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
| 45 |
+
tensor<int32, [2]> concat_1x = const()[name = tensor<string, []>("concat_1x"), val = tensor<int32, [2]>([1, -1])];
|
| 46 |
+
tensor<fp16, [1, ?]> input_cast_fp16 = reshape(shape = concat_1x, x = input_5_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
| 47 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
| 48 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
| 49 |
+
tensor<fp16, [1, 1, ?]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
|
| 50 |
+
tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
|
| 51 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 52 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 53 |
+
tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
|
| 54 |
+
tensor<fp16, [257, 1, 512]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 55 |
+
tensor<fp16, [1, 257, ?]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
|
| 56 |
+
tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
|
| 57 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 58 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 59 |
+
tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
|
| 60 |
+
tensor<fp16, [257, 1, 512]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(263296)))];
|
| 61 |
+
tensor<fp16, [1, 257, ?]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
|
| 62 |
+
tensor<int32, []> stack_0_axis_0 = const()[name = tensor<string, []>("stack_0_axis_0"), val = tensor<int32, []>(-1)];
|
| 63 |
+
tensor<fp16, [1, 257, ?, 2]> stack_0_cast_fp16 = stack(axis = stack_0_axis_0, values = (conv_0_cast_fp16, conv_1_cast_fp16))[name = tensor<string, []>("stack_0_cast_fp16")];
|
| 64 |
+
tensor<fp16, []> var_93_promoted_to_fp16 = const()[name = tensor<string, []>("op_93_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
|
| 65 |
+
tensor<fp16, [1, 257, ?, 2]> var_94_cast_fp16 = pow(x = stack_0_cast_fp16, y = var_93_promoted_to_fp16)[name = tensor<string, []>("op_94_cast_fp16")];
|
| 66 |
+
tensor<int32, [1]> var_99_axes_0 = const()[name = tensor<string, []>("op_99_axes_0"), val = tensor<int32, [1]>([-1])];
|
| 67 |
+
tensor<bool, []> var_99_keep_dims_0 = const()[name = tensor<string, []>("op_99_keep_dims_0"), val = tensor<bool, []>(false)];
|
| 68 |
+
tensor<fp16, [1, 257, ?]> var_99_cast_fp16 = reduce_sum(axes = var_99_axes_0, keep_dims = var_99_keep_dims_0, x = var_94_cast_fp16)[name = tensor<string, []>("op_99_cast_fp16")];
|
| 69 |
+
tensor<fp16, [1, 257, ?]> x_7_cast_fp16 = identity(x = var_99_cast_fp16)[name = tensor<string, []>("x_7_cast_fp16")];
|
| 70 |
+
tensor<bool, []> x_9_transpose_x_0 = const()[name = tensor<string, []>("x_9_transpose_x_0"), val = tensor<bool, []>(false)];
|
| 71 |
+
tensor<bool, []> x_9_transpose_y_0 = const()[name = tensor<string, []>("x_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
| 72 |
+
tensor<fp16, [1, 128, 257]> filterbanks_to_fp16 = const()[name = tensor<string, []>("filterbanks_to_fp16"), val = tensor<fp16, [1, 128, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(526528)))];
|
| 73 |
+
tensor<fp16, [1, 128, ?]> x_9_cast_fp16 = matmul(transpose_x = x_9_transpose_x_0, transpose_y = x_9_transpose_y_0, x = filterbanks_to_fp16, y = x_7_cast_fp16)[name = tensor<string, []>("x_9_cast_fp16")];
|
| 74 |
+
tensor<fp16, []> var_108_to_fp16 = const()[name = tensor<string, []>("op_108_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
| 75 |
+
tensor<fp16, [1, 128, ?]> var_109_cast_fp16 = add(x = x_9_cast_fp16, y = var_108_to_fp16)[name = tensor<string, []>("op_109_cast_fp16")];
|
| 76 |
+
tensor<fp32, []> x_11_epsilon_0 = const()[name = tensor<string, []>("x_11_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
| 77 |
+
tensor<fp16, [1, 128, ?]> x_11_cast_fp16 = log(epsilon = x_11_epsilon_0, x = var_109_cast_fp16)[name = tensor<string, []>("x_11_cast_fp16")];
|
| 78 |
+
tensor<int32, []> var_114 = const()[name = tensor<string, []>("op_114"), val = tensor<int32, []>(1)];
|
| 79 |
+
tensor<int32, [3]> var_116_shape_cast_fp16 = shape(x = x_11_cast_fp16)[name = tensor<string, []>("op_116_shape_cast_fp16")];
|
| 80 |
+
tensor<int32, []> gather_5_axis_0 = const()[name = tensor<string, []>("gather_5_axis_0"), val = tensor<int32, []>(0)];
|
| 81 |
+
tensor<int32, []> gather_5_batch_dims_0 = const()[name = tensor<string, []>("gather_5_batch_dims_0"), val = tensor<int32, []>(0)];
|
| 82 |
+
tensor<bool, []> gather_5_validate_indices_0 = const()[name = tensor<string, []>("gather_5_validate_indices_0"), val = tensor<bool, []>(false)];
|
| 83 |
+
tensor<string, []> var_116_shape_cast_fp16_to_uint16_dtype_0 = const()[name = tensor<string, []>("op_116_shape_cast_fp16_to_uint16_dtype_0"), val = tensor<string, []>("uint16")];
|
| 84 |
+
tensor<uint16, []> select_5_to_uint16 = const()[name = tensor<string, []>("select_5_to_uint16"), val = tensor<uint16, []>(2)];
|
| 85 |
+
tensor<uint16, [3]> var_116_shape_cast_fp16_to_uint16 = cast(dtype = var_116_shape_cast_fp16_to_uint16_dtype_0, x = var_116_shape_cast_fp16)[name = tensor<string, []>("cast_20")];
|
| 86 |
+
tensor<uint16, []> gather_5_cast_uint16 = gather(axis = gather_5_axis_0, batch_dims = gather_5_batch_dims_0, indices = select_5_to_uint16, validate_indices = gather_5_validate_indices_0, x = var_116_shape_cast_fp16_to_uint16)[name = tensor<string, []>("gather_5_cast_uint16")];
|
| 87 |
+
tensor<string, []> gather_5_cast_uint16_to_int32_dtype_0 = const()[name = tensor<string, []>("gather_5_cast_uint16_to_int32_dtype_0"), val = tensor<string, []>("int32")];
|
| 88 |
+
tensor<int32, []> const_1 = const()[name = tensor<string, []>("const_1"), val = tensor<int32, []>(0)];
|
| 89 |
+
tensor<int32, []> const_2 = const()[name = tensor<string, []>("const_2"), val = tensor<int32, []>(1)];
|
| 90 |
+
tensor<int32, []> gather_5_cast_uint16_to_int32 = cast(dtype = gather_5_cast_uint16_to_int32_dtype_0, x = gather_5_cast_uint16)[name = tensor<string, []>("cast_19")];
|
| 91 |
+
tensor<int32, [?]> var_124 = range_1d(end = gather_5_cast_uint16_to_int32, start = const_1, step = const_2)[name = tensor<string, []>("op_124")];
|
| 92 |
+
tensor<int32, [1]> var_126_axes_0 = const()[name = tensor<string, []>("op_126_axes_0"), val = tensor<int32, [1]>([0])];
|
| 93 |
+
tensor<int32, [1, ?]> var_126 = expand_dims(axes = var_126_axes_0, x = var_124)[name = tensor<string, []>("op_126")];
|
| 94 |
+
tensor<int32, []> concat_2_axis_0 = const()[name = tensor<string, []>("concat_2_axis_0"), val = tensor<int32, []>(0)];
|
| 95 |
+
tensor<bool, []> concat_2_interleave_0 = const()[name = tensor<string, []>("concat_2_interleave_0"), val = tensor<bool, []>(false)];
|
| 96 |
+
tensor<int32, [2]> concat_2 = concat(axis = concat_2_axis_0, interleave = concat_2_interleave_0, values = (var_114, gather_5_cast_uint16_to_int32))[name = tensor<string, []>("concat_2")];
|
| 97 |
+
tensor<int32, [2]> shape_6 = shape(x = var_126)[name = tensor<string, []>("shape_6")];
|
| 98 |
+
tensor<int32, [2]> real_div_0 = real_div(x = concat_2, y = shape_6)[name = tensor<string, []>("real_div_0")];
|
| 99 |
+
tensor<int32, [?, ?]> time_steps = tile(reps = real_div_0, x = var_126)[name = tensor<string, []>("time_steps")];
|
| 100 |
+
tensor<int32, [1]> var_131_axes_0 = const()[name = tensor<string, []>("op_131_axes_0"), val = tensor<int32, [1]>([1])];
|
| 101 |
+
tensor<int32, [1]> melspectrogram_length = cast(dtype = cast_0_dtype_0, x = seq_len_1_cast_fp16)[name = tensor<string, []>("cast_22")];
|
| 102 |
+
tensor<int32, [1, 1]> var_131 = expand_dims(axes = var_131_axes_0, x = melspectrogram_length)[name = tensor<string, []>("op_131")];
|
| 103 |
+
tensor<bool, [?, ?]> valid_mask = less(x = time_steps, y = var_131)[name = tensor<string, []>("valid_mask")];
|
| 104 |
+
tensor<int32, [1]> var_134_axes_0 = const()[name = tensor<string, []>("op_134_axes_0"), val = tensor<int32, [1]>([1])];
|
| 105 |
+
tensor<bool, [?, 1, ?]> var_134 = expand_dims(axes = var_134_axes_0, x = valid_mask)[name = tensor<string, []>("op_134")];
|
| 106 |
+
tensor<fp16, []> var_135_to_fp16 = const()[name = tensor<string, []>("op_135_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 107 |
+
tensor<fp16, [1, 128, ?]> var_136_cast_fp16 = select(a = x_11_cast_fp16, b = var_135_to_fp16, cond = var_134)[name = tensor<string, []>("op_136_cast_fp16")];
|
| 108 |
+
tensor<int32, [1]> x_mean_numerator_axes_0 = const()[name = tensor<string, []>("x_mean_numerator_axes_0"), val = tensor<int32, [1]>([2])];
|
| 109 |
+
tensor<bool, []> x_mean_numerator_keep_dims_0 = const()[name = tensor<string, []>("x_mean_numerator_keep_dims_0"), val = tensor<bool, []>(false)];
|
| 110 |
+
tensor<fp16, [1, 128]> x_mean_numerator_cast_fp16 = reduce_sum(axes = x_mean_numerator_axes_0, keep_dims = x_mean_numerator_keep_dims_0, x = var_136_cast_fp16)[name = tensor<string, []>("x_mean_numerator_cast_fp16")];
|
| 111 |
+
tensor<int32, [1]> var_146_axes_0 = const()[name = tensor<string, []>("op_146_axes_0"), val = tensor<int32, [1]>([1])];
|
| 112 |
+
tensor<bool, []> var_146_keep_dims_0 = const()[name = tensor<string, []>("op_146_keep_dims_0"), val = tensor<bool, []>(false)];
|
| 113 |
+
tensor<string, []> cast_4_to_fp16_dtype_0 = const()[name = tensor<string, []>("cast_4_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
| 114 |
+
tensor<fp16, [?, ?]> valid_mask_to_fp16 = cast(dtype = cast_4_to_fp16_dtype_0, x = valid_mask)[name = tensor<string, []>("cast_18")];
|
| 115 |
+
tensor<fp16, [?]> var_146_cast_fp16 = reduce_sum(axes = var_146_axes_0, keep_dims = var_146_keep_dims_0, x = valid_mask_to_fp16)[name = tensor<string, []>("op_146_cast_fp16")];
|
| 116 |
+
tensor<fp16, []> var_147_promoted_to_fp16 = const()[name = tensor<string, []>("op_147_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
| 117 |
+
tensor<fp16, []> const_3_to_fp16 = const()[name = tensor<string, []>("const_3_to_fp16"), val = tensor<fp16, []>(inf)];
|
| 118 |
+
tensor<fp16, [?]> clip_0_cast_fp16 = clip(alpha = var_147_promoted_to_fp16, beta = const_3_to_fp16, x = var_146_cast_fp16)[name = tensor<string, []>("clip_0_cast_fp16")];
|
| 119 |
+
tensor<int32, [1]> var_151_axes_0 = const()[name = tensor<string, []>("op_151_axes_0"), val = tensor<int32, [1]>([1])];
|
| 120 |
+
tensor<fp16, [?, 1]> var_151_cast_fp16 = expand_dims(axes = var_151_axes_0, x = clip_0_cast_fp16)[name = tensor<string, []>("op_151_cast_fp16")];
|
| 121 |
+
tensor<fp16, [?, 128]> x_mean_cast_fp16 = real_div(x = x_mean_numerator_cast_fp16, y = var_151_cast_fp16)[name = tensor<string, []>("x_mean_cast_fp16")];
|
| 122 |
+
tensor<fp16, []> var_156_to_fp16 = const()[name = tensor<string, []>("op_156_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
| 123 |
+
tensor<fp16, [?, 1]> var_157_cast_fp16 = sub(x = var_151_cast_fp16, y = var_156_to_fp16)[name = tensor<string, []>("op_157_cast_fp16")];
|
| 124 |
+
tensor<fp16, []> var_158_to_fp16 = const()[name = tensor<string, []>("op_158_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
|
| 125 |
+
tensor<fp16, []> const_4_to_fp16 = const()[name = tensor<string, []>("const_4_to_fp16"), val = tensor<fp16, []>(inf)];
|
| 126 |
+
tensor<fp16, [?, 1]> clip_1_cast_fp16 = clip(alpha = var_158_to_fp16, beta = const_4_to_fp16, x = var_157_cast_fp16)[name = tensor<string, []>("clip_1_cast_fp16")];
|
| 127 |
+
tensor<int32, [1]> var_164_axes_0 = const()[name = tensor<string, []>("op_164_axes_0"), val = tensor<int32, [1]>([2])];
|
| 128 |
+
tensor<fp16, [?, 128, 1]> var_164_cast_fp16 = expand_dims(axes = var_164_axes_0, x = x_mean_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
| 129 |
+
tensor<fp16, [?, 128, ?]> var_166_cast_fp16 = sub(x = x_11_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("op_166_cast_fp16")];
|
| 130 |
+
tensor<fp16, []> var_167_to_fp16 = const()[name = tensor<string, []>("op_167_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 131 |
+
tensor<fp16, [?, 128, ?]> var_168_cast_fp16 = select(a = var_166_cast_fp16, b = var_167_to_fp16, cond = var_134)[name = tensor<string, []>("op_168_cast_fp16")];
|
| 132 |
+
tensor<fp16, []> var_169_promoted_to_fp16 = const()[name = tensor<string, []>("op_169_promoted_to_fp16"), val = tensor<fp16, []>(0x1p+1)];
|
| 133 |
+
tensor<fp16, [?, 128, ?]> var_170_cast_fp16 = pow(x = var_168_cast_fp16, y = var_169_promoted_to_fp16)[name = tensor<string, []>("op_170_cast_fp16")];
|
| 134 |
+
tensor<int32, [1]> var_175_axes_0 = const()[name = tensor<string, []>("op_175_axes_0"), val = tensor<int32, [1]>([2])];
|
| 135 |
+
tensor<bool, []> var_175_keep_dims_0 = const()[name = tensor<string, []>("op_175_keep_dims_0"), val = tensor<bool, []>(false)];
|
| 136 |
+
tensor<fp16, [?, 128]> var_175_cast_fp16 = reduce_sum(axes = var_175_axes_0, keep_dims = var_175_keep_dims_0, x = var_170_cast_fp16)[name = tensor<string, []>("op_175_cast_fp16")];
|
| 137 |
+
tensor<fp16, [?, 128]> var_176_cast_fp16 = real_div(x = var_175_cast_fp16, y = clip_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
| 138 |
+
tensor<fp16, [?, 128]> x_std_1_cast_fp16 = sqrt(x = var_176_cast_fp16)[name = tensor<string, []>("x_std_1_cast_fp16")];
|
| 139 |
+
tensor<fp16, []> var_178_to_fp16 = const()[name = tensor<string, []>("op_178_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 140 |
+
tensor<fp16, [?, 128]> x_std_cast_fp16 = add(x = x_std_1_cast_fp16, y = var_178_to_fp16)[name = tensor<string, []>("x_std_cast_fp16")];
|
| 141 |
+
tensor<int32, [1]> var_186_axes_0 = const()[name = tensor<string, []>("op_186_axes_0"), val = tensor<int32, [1]>([2])];
|
| 142 |
+
tensor<fp16, [?, 128, 1]> var_186_cast_fp16 = expand_dims(axes = var_186_axes_0, x = x_std_cast_fp16)[name = tensor<string, []>("op_186_cast_fp16")];
|
| 143 |
+
tensor<fp16, [?, 128, ?]> x_cast_fp16 = real_div(x = var_166_cast_fp16, y = var_186_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
|
| 144 |
+
tensor<int32, [3]> var_189_shape_cast_fp16 = shape(x = x_cast_fp16)[name = tensor<string, []>("op_189_shape_cast_fp16")];
|
| 145 |
+
tensor<int32, []> gather_6_axis_0 = const()[name = tensor<string, []>("gather_6_axis_0"), val = tensor<int32, []>(0)];
|
| 146 |
+
tensor<int32, []> gather_6_batch_dims_0 = const()[name = tensor<string, []>("gather_6_batch_dims_0"), val = tensor<int32, []>(0)];
|
| 147 |
+
tensor<bool, []> gather_6_validate_indices_0 = const()[name = tensor<string, []>("gather_6_validate_indices_0"), val = tensor<bool, []>(false)];
|
| 148 |
+
tensor<string, []> var_189_shape_cast_fp16_to_uint16_dtype_0 = const()[name = tensor<string, []>("op_189_shape_cast_fp16_to_uint16_dtype_0"), val = tensor<string, []>("uint16")];
|
| 149 |
+
tensor<uint16, []> select_6_to_uint16 = const()[name = tensor<string, []>("select_6_to_uint16"), val = tensor<uint16, []>(2)];
|
| 150 |
+
tensor<uint16, [3]> var_189_shape_cast_fp16_to_uint16 = cast(dtype = var_189_shape_cast_fp16_to_uint16_dtype_0, x = var_189_shape_cast_fp16)[name = tensor<string, []>("cast_17")];
|
| 151 |
+
tensor<uint16, []> gather_6_cast_uint16 = gather(axis = gather_6_axis_0, batch_dims = gather_6_batch_dims_0, indices = select_6_to_uint16, validate_indices = gather_6_validate_indices_0, x = var_189_shape_cast_fp16_to_uint16)[name = tensor<string, []>("gather_6_cast_uint16")];
|
| 152 |
+
tensor<string, []> gather_6_cast_uint16_to_int32_dtype_0 = const()[name = tensor<string, []>("gather_6_cast_uint16_to_int32_dtype_0"), val = tensor<string, []>("int32")];
|
| 153 |
+
tensor<int32, []> const_5 = const()[name = tensor<string, []>("const_5"), val = tensor<int32, []>(0)];
|
| 154 |
+
tensor<int32, []> const_6 = const()[name = tensor<string, []>("const_6"), val = tensor<int32, []>(1)];
|
| 155 |
+
tensor<int32, []> gather_6_cast_uint16_to_int32 = cast(dtype = gather_6_cast_uint16_to_int32_dtype_0, x = gather_6_cast_uint16)[name = tensor<string, []>("cast_16")];
|
| 156 |
+
tensor<int32, [?]> mask_1 = range_1d(end = gather_6_cast_uint16_to_int32, start = const_5, step = const_6)[name = tensor<string, []>("mask_1")];
|
| 157 |
+
tensor<int32, []> gather_7_axis_0 = const()[name = tensor<string, []>("gather_7_axis_0"), val = tensor<int32, []>(0)];
|
| 158 |
+
tensor<int32, []> gather_7_batch_dims_0 = const()[name = tensor<string, []>("gather_7_batch_dims_0"), val = tensor<int32, []>(0)];
|
| 159 |
+
tensor<bool, []> gather_7_validate_indices_0 = const()[name = tensor<string, []>("gather_7_validate_indices_0"), val = tensor<bool, []>(false)];
|
| 160 |
+
tensor<uint16, []> select_7_to_uint16 = const()[name = tensor<string, []>("select_7_to_uint16"), val = tensor<uint16, []>(0)];
|
| 161 |
+
tensor<uint16, []> gather_7_cast_uint16 = gather(axis = gather_7_axis_0, batch_dims = gather_7_batch_dims_0, indices = select_7_to_uint16, validate_indices = gather_7_validate_indices_0, x = var_189_shape_cast_fp16_to_uint16)[name = tensor<string, []>("gather_7_cast_uint16")];
|
| 162 |
+
tensor<string, []> gather_7_cast_uint16_to_int32_dtype_0 = const()[name = tensor<string, []>("gather_7_cast_uint16_to_int32_dtype_0"), val = tensor<string, []>("int32")];
|
| 163 |
+
tensor<int32, []> var_201 = const()[name = tensor<string, []>("op_201"), val = tensor<int32, []>(1)];
|
| 164 |
+
tensor<int32, []> concat_3_axis_0 = const()[name = tensor<string, []>("concat_3_axis_0"), val = tensor<int32, []>(0)];
|
| 165 |
+
tensor<bool, []> concat_3_interleave_0 = const()[name = tensor<string, []>("concat_3_interleave_0"), val = tensor<bool, []>(false)];
|
| 166 |
+
tensor<int32, []> gather_7_cast_uint16_to_int32 = cast(dtype = gather_7_cast_uint16_to_int32_dtype_0, x = gather_7_cast_uint16)[name = tensor<string, []>("cast_15")];
|
| 167 |
+
tensor<int32, [2]> concat_3 = concat(axis = concat_3_axis_0, interleave = concat_3_interleave_0, values = (gather_7_cast_uint16_to_int32, var_201))[name = tensor<string, []>("concat_3")];
|
| 168 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
| 169 |
+
tensor<int32, [1, ?]> expand_dims_0 = expand_dims(axes = expand_dims_0_axes_0, x = mask_1)[name = tensor<string, []>("expand_dims_0")];
|
| 170 |
+
tensor<int32, [?, ?]> var_203 = tile(reps = concat_3, x = expand_dims_0)[name = tensor<string, []>("op_203")];
|
| 171 |
+
tensor<bool, [?, ?]> mask = greater_equal(x = var_203, y = var_131)[name = tensor<string, []>("mask")];
|
| 172 |
+
tensor<int32, [1]> var_208_axes_0 = const()[name = tensor<string, []>("op_208_axes_0"), val = tensor<int32, [1]>([1])];
|
| 173 |
+
tensor<bool, [?, 1, ?]> var_208 = expand_dims(axes = var_208_axes_0, x = mask)[name = tensor<string, []>("op_208")];
|
| 174 |
+
tensor<fp16, []> var_222_to_fp16 = const()[name = tensor<string, []>("op_222_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 175 |
+
tensor<fp16, [?, 128, ?]> melspectrogram = select(a = var_222_to_fp16, b = x_cast_fp16, cond = var_208)[name = tensor<string, []>("op_223_cast_fp16")];
|
| 176 |
+
} -> (melspectrogram, melspectrogram_length);
|
| 177 |
+
}
|
Melspectrogram_v2.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:023c2303b7c3a1fafed92fc6ec46c1d43a48c0bbcdf33d6441d383a61747734c
|
| 3 |
+
size 592384
|