Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- openai_whisper-large-v3/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-large-v3_turbo/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-large-v3_turbo/TextDecoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small.en/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small.en/TextDecoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small/AudioEncoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small/AudioEncoder.mlmodelc/metadata.json +69 -0
- openai_whisper-small/AudioEncoder.mlmodelc/model.mil +0 -0
- openai_whisper-small/AudioEncoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-small/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-small/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small/MelSpectrogram.mlmodelc/metadata.json +71 -0
- openai_whisper-small/MelSpectrogram.mlmodelc/model.mil +66 -0
- openai_whisper-small/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-small/TextDecoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small/TextDecoder.mlmodelc/metadata.json +165 -0
- openai_whisper-small/TextDecoder.mlmodelc/model.mil +0 -0
- openai_whisper-small/TextDecoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-small/TextDecoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small/config.json +1 -0
- openai_whisper-small/generation_config.json +1 -0
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/metadata.json +67 -0
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mil +0 -0
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-tiny.en/AudioEncoder.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- openai_whisper-tiny.en/AudioEncoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- openai_whisper-tiny.en/AudioEncoder.mlpackage/Manifest.json +18 -0
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/metadata.json +71 -0
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/model.mil +66 -0
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-tiny.en/MelSpectrogram.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- openai_whisper-tiny.en/MelSpectrogram.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- openai_whisper-tiny.en/MelSpectrogram.mlpackage/Manifest.json +18 -0
- openai_whisper-tiny.en/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-tiny.en/TextDecoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-tiny.en/TextDecoder.mlmodelc/metadata.json +165 -0
- openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil +0 -0
- openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-tiny.en/TextDecoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-tiny.en/TextDecoder.mlpackage/Data/com.apple.CoreML/model.mlmodel +3 -0
- openai_whisper-tiny.en/TextDecoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin +3 -0
- openai_whisper-tiny.en/TextDecoder.mlpackage/Manifest.json +18 -0
- openai_whisper-tiny.en/config.json +1 -0
openai_whisper-large-v3/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb07bab32dcd62ce653b5b288bd6c27bdc5a538be309f242e33ed05e1cb53457
|
| 3 |
+
size 1273974400
|
openai_whisper-large-v3_turbo/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f46870171555792f9e98d5266d2c7d885a18962093b3a9544fffa54dbe8df16
|
| 3 |
+
size 1273974400
|
openai_whisper-large-v3_turbo/TextDecoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:745c0c3896c41cd6ad01b6c3ed852e0bb1cb2fd1ef579017c5cc9a8aff1d3c66
|
| 3 |
+
size 1813201716
|
openai_whisper-small.en/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f92860042703b3679071e7eeb03c861e52bf0e1da38943cf7c37eb5fecfb3abe
|
| 3 |
+
size 176323456
|
openai_whisper-small.en/TextDecoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a71a58c723a8c379fbc0ba666d6a3d3dd85d84d34ee8665697d2edab52f2f6b1
|
| 3 |
+
size 307285808
|
openai_whisper-small/AudioEncoder.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d68f152b6573ac55203a3dc8383730e6ecde685c7d2a88815b89820c88e35371
|
| 3 |
+
size 347
|
openai_whisper-small/AudioEncoder.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Float16",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[1, 768, 1, 1500]",
|
| 13 |
+
"name" : "encoder_output_embeds",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
"modelParameters" : [
|
| 18 |
+
|
| 19 |
+
],
|
| 20 |
+
"specificationVersion" : 7,
|
| 21 |
+
"mlProgramOperationTypeHistogram" : {
|
| 22 |
+
"Concat" : 156,
|
| 23 |
+
"Ios16.rsqrt" : 25,
|
| 24 |
+
"Ios16.mul" : 626,
|
| 25 |
+
"SliceByIndex" : 1008,
|
| 26 |
+
"Ios16.sub" : 25,
|
| 27 |
+
"Transpose" : 12,
|
| 28 |
+
"Ios16.einsum" : 1152,
|
| 29 |
+
"Ios16.conv" : 74,
|
| 30 |
+
"Ios16.add" : 50,
|
| 31 |
+
"Ios16.reduceMean" : 50,
|
| 32 |
+
"Ios16.softmax" : 576,
|
| 33 |
+
"Ios16.gelu" : 14,
|
| 34 |
+
"Ios16.batchNorm" : 25
|
| 35 |
+
},
|
| 36 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
| 37 |
+
"isUpdatable" : "0",
|
| 38 |
+
"availability" : {
|
| 39 |
+
"macOS" : "13.0",
|
| 40 |
+
"tvOS" : "16.0",
|
| 41 |
+
"visionOS" : "1.0",
|
| 42 |
+
"watchOS" : "9.0",
|
| 43 |
+
"iOS" : "16.0",
|
| 44 |
+
"macCatalyst" : "16.0"
|
| 45 |
+
},
|
| 46 |
+
"modelType" : {
|
| 47 |
+
"name" : "MLModelType_mlProgram"
|
| 48 |
+
},
|
| 49 |
+
"userDefinedMetadata" : {
|
| 50 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 51 |
+
"com.github.apple.coremltools.source" : "torch==2.2.1",
|
| 52 |
+
"com.github.apple.coremltools.version" : "7.1"
|
| 53 |
+
},
|
| 54 |
+
"inputSchema" : [
|
| 55 |
+
{
|
| 56 |
+
"hasShapeFlexibility" : "0",
|
| 57 |
+
"isOptional" : "0",
|
| 58 |
+
"dataType" : "Float16",
|
| 59 |
+
"formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
|
| 60 |
+
"shortDescription" : "",
|
| 61 |
+
"shape" : "[1, 80, 1, 3000]",
|
| 62 |
+
"name" : "melspectrogram_features",
|
| 63 |
+
"type" : "MultiArray"
|
| 64 |
+
}
|
| 65 |
+
],
|
| 66 |
+
"generatedClassName" : "AudioEncoder",
|
| 67 |
+
"method" : "predict"
|
| 68 |
+
}
|
| 69 |
+
]
|
openai_whisper-small/AudioEncoder.mlmodelc/model.mil
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
openai_whisper-small/AudioEncoder.mlmodelc/model.mlmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68ca04660b8b050c68ca54c27d97c47e4133bc591422cb7009de8922d56fb8c9
|
| 3 |
+
size 155271
|
openai_whisper-small/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe35cef2c9406993a635639b16f373f6debb0215ac115b7bf93fa03c8e10310b
|
| 3 |
+
size 176323456
|
openai_whisper-small/MelSpectrogram.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f77e6457285248f99cd7aa3fd4cc2efbb17733e63e7023ac53abe1f95785d07
|
| 3 |
+
size 243
|
openai_whisper-small/MelSpectrogram.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dabdc5aa69f6ef4d97dc9499f5c30514e00e96b53b750b33a5a6471363c71662
|
| 3 |
+
size 328
|
openai_whisper-small/MelSpectrogram.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Float16",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[1, 80, 1, 3000]",
|
| 13 |
+
"name" : "melspectrogram_features",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
"modelParameters" : [
|
| 18 |
+
|
| 19 |
+
],
|
| 20 |
+
"specificationVersion" : 7,
|
| 21 |
+
"mlProgramOperationTypeHistogram" : {
|
| 22 |
+
"Pad" : 1,
|
| 23 |
+
"Ios16.mul" : 2,
|
| 24 |
+
"SliceByIndex" : 1,
|
| 25 |
+
"Ios16.sub" : 1,
|
| 26 |
+
"Ios16.log" : 1,
|
| 27 |
+
"Ios16.conv" : 2,
|
| 28 |
+
"Ios16.add" : 3,
|
| 29 |
+
"Ios16.square" : 2,
|
| 30 |
+
"Ios16.matmul" : 1,
|
| 31 |
+
"Squeeze" : 2,
|
| 32 |
+
"Ios16.maximum" : 1,
|
| 33 |
+
"ExpandDims" : 4,
|
| 34 |
+
"Ios16.reduceMax" : 1,
|
| 35 |
+
"Identity" : 1,
|
| 36 |
+
"Ios16.reshape" : 2
|
| 37 |
+
},
|
| 38 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
| 39 |
+
"isUpdatable" : "0",
|
| 40 |
+
"availability" : {
|
| 41 |
+
"macOS" : "13.0",
|
| 42 |
+
"tvOS" : "16.0",
|
| 43 |
+
"visionOS" : "1.0",
|
| 44 |
+
"watchOS" : "9.0",
|
| 45 |
+
"iOS" : "16.0",
|
| 46 |
+
"macCatalyst" : "16.0"
|
| 47 |
+
},
|
| 48 |
+
"modelType" : {
|
| 49 |
+
"name" : "MLModelType_mlProgram"
|
| 50 |
+
},
|
| 51 |
+
"userDefinedMetadata" : {
|
| 52 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 53 |
+
"com.github.apple.coremltools.source" : "torch==2.2.1",
|
| 54 |
+
"com.github.apple.coremltools.version" : "7.1"
|
| 55 |
+
},
|
| 56 |
+
"inputSchema" : [
|
| 57 |
+
{
|
| 58 |
+
"hasShapeFlexibility" : "0",
|
| 59 |
+
"isOptional" : "0",
|
| 60 |
+
"dataType" : "Float16",
|
| 61 |
+
"formattedType" : "MultiArray (Float16 480000)",
|
| 62 |
+
"shortDescription" : "",
|
| 63 |
+
"shape" : "[480000]",
|
| 64 |
+
"name" : "audio",
|
| 65 |
+
"type" : "MultiArray"
|
| 66 |
+
}
|
| 67 |
+
],
|
| 68 |
+
"generatedClassName" : "MelSpectrogram",
|
| 69 |
+
"method" : "predict"
|
| 70 |
+
}
|
| 71 |
+
]
|
openai_whisper-small/MelSpectrogram.mlmodelc/model.mil
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
program(1.0)
|
| 2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
|
| 3 |
+
{
|
| 4 |
+
func main<ios16>(tensor<fp16, [480000]> audio) {
|
| 5 |
+
tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
| 6 |
+
tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = tensor<string, []>("input_1_cast_fp16")];
|
| 7 |
+
tensor<int32, [6]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
|
| 8 |
+
tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("reflect")];
|
| 9 |
+
tensor<fp16, []> input_3_constant_val_0_to_fp16 = const()[name = tensor<string, []>("input_3_constant_val_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 10 |
+
tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = input_3_constant_val_0_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
| 11 |
+
tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
|
| 12 |
+
tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
| 13 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
| 14 |
+
tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
|
| 15 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
| 16 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
| 17 |
+
tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
|
| 18 |
+
tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
|
| 19 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 20 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 21 |
+
tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
|
| 22 |
+
tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 23 |
+
tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
|
| 24 |
+
tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
|
| 25 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 26 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 27 |
+
tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
|
| 28 |
+
tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160960)))];
|
| 29 |
+
tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
|
| 30 |
+
tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
|
| 31 |
+
tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
|
| 32 |
+
tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
|
| 33 |
+
tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
|
| 34 |
+
tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
|
| 35 |
+
tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
|
| 36 |
+
tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
|
| 37 |
+
tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = tensor<string, []>("magnitudes_1_cast_fp16")];
|
| 38 |
+
tensor<int32, [2]> magnitudes_begin_0 = const()[name = tensor<string, []>("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
| 39 |
+
tensor<int32, [2]> magnitudes_end_0 = const()[name = tensor<string, []>("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
|
| 40 |
+
tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = tensor<string, []>("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
| 41 |
+
tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = tensor<string, []>("magnitudes_cast_fp16")];
|
| 42 |
+
tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
| 43 |
+
tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
| 44 |
+
tensor<fp16, [80, 201]> mel_filters_to_fp16 = const()[name = tensor<string, []>("mel_filters_to_fp16"), val = tensor<fp16, [80, 201]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(321856)))];
|
| 45 |
+
tensor<fp16, [80, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
|
| 46 |
+
tensor<fp16, []> var_41_to_fp16 = const()[name = tensor<string, []>("op_41_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
| 47 |
+
tensor<fp16, [80, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
|
| 48 |
+
tensor<fp16, []> log_0_epsilon_0_to_fp16 = const()[name = tensor<string, []>("log_0_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 49 |
+
tensor<fp16, [80, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0_to_fp16, x = mel_spec_cast_fp16)[name = tensor<string, []>("log_0_cast_fp16")];
|
| 50 |
+
tensor<fp16, []> mul_0_y_0_to_fp16 = const()[name = tensor<string, []>("mul_0_y_0_to_fp16"), val = tensor<fp16, []>(0x1.bccp-2)];
|
| 51 |
+
tensor<fp16, [80, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
|
| 52 |
+
tensor<bool, []> var_44_keep_dims_0 = const()[name = tensor<string, []>("op_44_keep_dims_0"), val = tensor<bool, []>(false)];
|
| 53 |
+
tensor<fp16, []> var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
|
| 54 |
+
tensor<fp16, []> var_46_to_fp16 = const()[name = tensor<string, []>("op_46_to_fp16"), val = tensor<fp16, []>(0x1p+3)];
|
| 55 |
+
tensor<fp16, []> var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = tensor<string, []>("op_47_cast_fp16")];
|
| 56 |
+
tensor<fp16, [80, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = tensor<string, []>("log_spec_3_cast_fp16")];
|
| 57 |
+
tensor<fp16, []> var_50_to_fp16 = const()[name = tensor<string, []>("op_50_to_fp16"), val = tensor<fp16, []>(0x1p+2)];
|
| 58 |
+
tensor<fp16, [80, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
|
| 59 |
+
tensor<fp16, []> _inversed_log_spec_y_0_to_fp16 = const()[name = tensor<string, []>("_inversed_log_spec_y_0_to_fp16"), val = tensor<fp16, []>(0x1p-2)];
|
| 60 |
+
tensor<fp16, [80, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = tensor<string, []>("_inversed_log_spec_cast_fp16")];
|
| 61 |
+
tensor<int32, [1]> var_55_axes_0 = const()[name = tensor<string, []>("op_55_axes_0"), val = tensor<int32, [1]>([0])];
|
| 62 |
+
tensor<fp16, [1, 80, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
|
| 63 |
+
tensor<int32, [1]> var_62_axes_0 = const()[name = tensor<string, []>("op_62_axes_0"), val = tensor<int32, [1]>([2])];
|
| 64 |
+
tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
| 65 |
+
} -> (melspectrogram_features);
|
| 66 |
+
}
|
openai_whisper-small/MelSpectrogram.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:267017e533b5f542d195fd9a775f2ba649075128283ce8e86c63a2ec20de5b07
|
| 3 |
+
size 354080
|
openai_whisper-small/TextDecoder.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39c0d6d55353bc61ef8071081bb958dd1ab7b0b7f2a3338a797f1a64211e084c
|
| 3 |
+
size 243
|
openai_whisper-small/TextDecoder.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2ccd0b8920701386ab9554f7db47b43e55ee07863280ee5d829d5272839adc2
|
| 3 |
+
size 633
|
openai_whisper-small/TextDecoder.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Float16",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 51865)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[1, 1, 51865]",
|
| 13 |
+
"name" : "logits",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"hasShapeFlexibility" : "0",
|
| 18 |
+
"isOptional" : "0",
|
| 19 |
+
"dataType" : "Float16",
|
| 20 |
+
"formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
|
| 21 |
+
"shortDescription" : "",
|
| 22 |
+
"shape" : "[1, 9216, 1, 1]",
|
| 23 |
+
"name" : "key_cache_updates",
|
| 24 |
+
"type" : "MultiArray"
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"hasShapeFlexibility" : "0",
|
| 28 |
+
"isOptional" : "0",
|
| 29 |
+
"dataType" : "Float16",
|
| 30 |
+
"formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
|
| 31 |
+
"shortDescription" : "",
|
| 32 |
+
"shape" : "[1, 9216, 1, 1]",
|
| 33 |
+
"name" : "value_cache_updates",
|
| 34 |
+
"type" : "MultiArray"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"hasShapeFlexibility" : "0",
|
| 38 |
+
"isOptional" : "0",
|
| 39 |
+
"dataType" : "Float16",
|
| 40 |
+
"formattedType" : "MultiArray (Float16 1 × 1500)",
|
| 41 |
+
"shortDescription" : "",
|
| 42 |
+
"shape" : "[1, 1500]",
|
| 43 |
+
"name" : "alignment_heads_weights",
|
| 44 |
+
"type" : "MultiArray"
|
| 45 |
+
}
|
| 46 |
+
],
|
| 47 |
+
"modelParameters" : [
|
| 48 |
+
|
| 49 |
+
],
|
| 50 |
+
"specificationVersion" : 7,
|
| 51 |
+
"mlProgramOperationTypeHistogram" : {
|
| 52 |
+
"Split" : 2,
|
| 53 |
+
"Concat" : 3,
|
| 54 |
+
"Ios16.rsqrt" : 37,
|
| 55 |
+
"Ios16.mul" : 146,
|
| 56 |
+
"Squeeze" : 1,
|
| 57 |
+
"SliceByIndex" : 20,
|
| 58 |
+
"Ios16.sub" : 38,
|
| 59 |
+
"Transpose" : 1,
|
| 60 |
+
"Ios16.conv" : 120,
|
| 61 |
+
"Ios16.add" : 110,
|
| 62 |
+
"Ios16.linear" : 1,
|
| 63 |
+
"Ios16.matmul" : 48,
|
| 64 |
+
"Ios16.gelu" : 12,
|
| 65 |
+
"Ios16.reduceMean" : 75,
|
| 66 |
+
"ExpandDims" : 6,
|
| 67 |
+
"Ios16.batchNorm" : 37,
|
| 68 |
+
"Ios16.gather" : 2,
|
| 69 |
+
"Ios16.reshape" : 96,
|
| 70 |
+
"Ios16.softmax" : 24
|
| 71 |
+
},
|
| 72 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
| 73 |
+
"isUpdatable" : "0",
|
| 74 |
+
"availability" : {
|
| 75 |
+
"macOS" : "13.0",
|
| 76 |
+
"tvOS" : "16.0",
|
| 77 |
+
"visionOS" : "1.0",
|
| 78 |
+
"watchOS" : "9.0",
|
| 79 |
+
"iOS" : "16.0",
|
| 80 |
+
"macCatalyst" : "16.0"
|
| 81 |
+
},
|
| 82 |
+
"modelType" : {
|
| 83 |
+
"name" : "MLModelType_mlProgram"
|
| 84 |
+
},
|
| 85 |
+
"userDefinedMetadata" : {
|
| 86 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 87 |
+
"com.github.apple.coremltools.source" : "torch==2.2.1",
|
| 88 |
+
"com.github.apple.coremltools.version" : "7.1"
|
| 89 |
+
},
|
| 90 |
+
"inputSchema" : [
|
| 91 |
+
{
|
| 92 |
+
"hasShapeFlexibility" : "0",
|
| 93 |
+
"isOptional" : "0",
|
| 94 |
+
"dataType" : "Int32",
|
| 95 |
+
"formattedType" : "MultiArray (Int32 1)",
|
| 96 |
+
"shortDescription" : "",
|
| 97 |
+
"shape" : "[1]",
|
| 98 |
+
"name" : "input_ids",
|
| 99 |
+
"type" : "MultiArray"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"hasShapeFlexibility" : "0",
|
| 103 |
+
"isOptional" : "0",
|
| 104 |
+
"dataType" : "Int32",
|
| 105 |
+
"formattedType" : "MultiArray (Int32 1)",
|
| 106 |
+
"shortDescription" : "",
|
| 107 |
+
"shape" : "[1]",
|
| 108 |
+
"name" : "cache_length",
|
| 109 |
+
"type" : "MultiArray"
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"hasShapeFlexibility" : "0",
|
| 113 |
+
"isOptional" : "0",
|
| 114 |
+
"dataType" : "Float16",
|
| 115 |
+
"formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 224)",
|
| 116 |
+
"shortDescription" : "",
|
| 117 |
+
"shape" : "[1, 9216, 1, 224]",
|
| 118 |
+
"name" : "key_cache",
|
| 119 |
+
"type" : "MultiArray"
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"hasShapeFlexibility" : "0",
|
| 123 |
+
"isOptional" : "0",
|
| 124 |
+
"dataType" : "Float16",
|
| 125 |
+
"formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 224)",
|
| 126 |
+
"shortDescription" : "",
|
| 127 |
+
"shape" : "[1, 9216, 1, 224]",
|
| 128 |
+
"name" : "value_cache",
|
| 129 |
+
"type" : "MultiArray"
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"hasShapeFlexibility" : "0",
|
| 133 |
+
"isOptional" : "0",
|
| 134 |
+
"dataType" : "Float16",
|
| 135 |
+
"formattedType" : "MultiArray (Float16 1 × 224)",
|
| 136 |
+
"shortDescription" : "",
|
| 137 |
+
"shape" : "[1, 224]",
|
| 138 |
+
"name" : "kv_cache_update_mask",
|
| 139 |
+
"type" : "MultiArray"
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"hasShapeFlexibility" : "0",
|
| 143 |
+
"isOptional" : "0",
|
| 144 |
+
"dataType" : "Float16",
|
| 145 |
+
"formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
|
| 146 |
+
"shortDescription" : "",
|
| 147 |
+
"shape" : "[1, 768, 1, 1500]",
|
| 148 |
+
"name" : "encoder_output_embeds",
|
| 149 |
+
"type" : "MultiArray"
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"hasShapeFlexibility" : "0",
|
| 153 |
+
"isOptional" : "0",
|
| 154 |
+
"dataType" : "Float16",
|
| 155 |
+
"formattedType" : "MultiArray (Float16 1 × 224)",
|
| 156 |
+
"shortDescription" : "",
|
| 157 |
+
"shape" : "[1, 224]",
|
| 158 |
+
"name" : "decoder_key_padding_mask",
|
| 159 |
+
"type" : "MultiArray"
|
| 160 |
+
}
|
| 161 |
+
],
|
| 162 |
+
"generatedClassName" : "TextDecoder",
|
| 163 |
+
"method" : "predict"
|
| 164 |
+
}
|
| 165 |
+
]
|
openai_whisper-small/TextDecoder.mlmodelc/model.mil
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
openai_whisper-small/TextDecoder.mlmodelc/model.mlmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ea861c6dfdd866ed0f2e7fe0c3df7459daa44481cb25236e03698dd6d259391
|
| 3 |
+
size 313629
|
openai_whisper-small/TextDecoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bfea8044a8f38e8d33f56585b1e75ce023d3845e2a945e20480bd7e16558016e
|
| 3 |
+
size 307287346
|
openai_whisper-small/config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"_name_or_path": "openai/whisper-small", "activation_dropout": 0.0, "activation_function": "gelu", "architectures": ["WhisperForConditionalGeneration"], "attention_dropout": 0.0, "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "d_model": 768, "decoder_attention_heads": 12, "decoder_ffn_dim": 3072, "decoder_layerdrop": 0.0, "decoder_layers": 12, "decoder_start_token_id": 50258, "dropout": 0.0, "encoder_attention_heads": 12, "encoder_ffn_dim": 3072, "encoder_layerdrop": 0.0, "encoder_layers": 12, "eos_token_id": 50257, "forced_decoder_ids": [[1, 50259], [2, 50359], [3, 50363]], "init_std": 0.02, "is_encoder_decoder": true, "max_length": 448, "max_source_positions": 1500, "max_target_positions": 448, "model_type": "whisper", "num_hidden_layers": 12, "num_mel_bins": 80, "pad_token_id": 50257, "scale_embedding": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362], "torch_dtype": "float32", "transformers_version": "4.27.0.dev0", "use_cache": true, "vocab_size": 51865}
|
openai_whisper-small/generation_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"alignment_heads": [[5, 3], [5, 9], [8, 0], [8, 4], [8, 7], [8, 8], [9, 0], [9, 7], [9, 9], [10, 5]], "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "decoder_start_token_id": 50258, "eos_token_id": 50257, "forced_decoder_ids": [[1, null], [2, 50359]], "is_multilingual": true, "lang_to_id": {"<|af|>": 50327, "<|am|>": 50334, "<|ar|>": 50272, "<|as|>": 50350, "<|az|>": 50304, "<|ba|>": 50355, "<|be|>": 50330, "<|bg|>": 50292, "<|bn|>": 50302, "<|bo|>": 50347, "<|br|>": 50309, "<|bs|>": 50315, "<|ca|>": 50270, "<|cs|>": 50283, "<|cy|>": 50297, "<|da|>": 50285, "<|de|>": 50261, "<|el|>": 50281, "<|en|>": 50259, "<|es|>": 50262, "<|et|>": 50307, "<|eu|>": 50310, "<|fa|>": 50300, "<|fi|>": 50277, "<|fo|>": 50338, "<|fr|>": 50265, "<|gl|>": 50319, "<|gu|>": 50333, "<|haw|>": 50352, "<|ha|>": 50354, "<|he|>": 50279, "<|hi|>": 50276, "<|hr|>": 50291, "<|ht|>": 50339, "<|hu|>": 50286, "<|hy|>": 50312, "<|id|>": 50275, "<|is|>": 50311, "<|it|>": 50274, "<|ja|>": 50266, "<|jw|>": 50356, "<|ka|>": 50329, "<|kk|>": 50316, "<|km|>": 50323, "<|kn|>": 50306, "<|ko|>": 50264, "<|la|>": 50294, "<|lb|>": 50345, "<|ln|>": 50353, "<|lo|>": 50336, "<|lt|>": 50293, "<|lv|>": 50301, "<|mg|>": 50349, "<|mi|>": 50295, "<|mk|>": 50308, "<|ml|>": 50296, "<|mn|>": 50314, "<|mr|>": 50320, "<|ms|>": 50282, "<|mt|>": 50343, "<|my|>": 50346, "<|ne|>": 50313, "<|nl|>": 50271, "<|nn|>": 50342, "<|no|>": 50288, "<|oc|>": 50328, "<|pa|>": 50321, "<|pl|>": 50269, "<|ps|>": 50340, "<|pt|>": 50267, "<|ro|>": 50284, "<|ru|>": 50263, "<|sa|>": 50344, "<|sd|>": 50332, "<|si|>": 50322, "<|sk|>": 50298, "<|sl|>": 50305, "<|sn|>": 50324, "<|so|>": 50326, "<|sq|>": 50317, "<|sr|>": 50303, "<|su|>": 50357, "<|sv|>": 50273, "<|sw|>": 50318, "<|ta|>": 50287, "<|te|>": 50299, "<|tg|>": 50331, "<|th|>": 50289, "<|tk|>": 50341, "<|tl|>": 50348, "<|tr|>": 50268, "<|tt|>": 50351, "<|uk|>": 50280, "<|ur|>": 50290, "<|uz|>": 50337, "<|vi|>": 50278, "<|yi|>": 50335, "<|yo|>": 50325, "<|zh|>": 50260}, "max_initial_timestamp_index": 50, "max_length": 448, "no_timestamps_token_id": 50363, "pad_token_id": 50257, "prev_sot_token_id": 50361, "return_timestamps": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50358, 50359, 50360, 50361, 50362], "task_to_id": {"transcribe": 50359, "translate": 50358}, "transformers_version": "4.31.0.dev0"}
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eaaaa6671a96a359a0bbd5e97885246dcc17f7435b6ffad8d871bb940964500b
|
| 3 |
+
size 243
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:325b182d0a4266730a81795ae6b7a787b5111dd091500fc0c04dedf610015d46
|
| 3 |
+
size 347
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Float16",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16 1 × 384 × 1 × 1500)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[1, 384, 1, 1500]",
|
| 13 |
+
"name" : "encoder_output_embeds",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
"modelParameters" : [
|
| 18 |
+
|
| 19 |
+
],
|
| 20 |
+
"specificationVersion" : 7,
|
| 21 |
+
"mlProgramOperationTypeHistogram" : {
|
| 22 |
+
"Concat" : 28,
|
| 23 |
+
"Ios16.add" : 9,
|
| 24 |
+
"Ios16.mul" : 96,
|
| 25 |
+
"SliceByIndex" : 168,
|
| 26 |
+
"Transpose" : 4,
|
| 27 |
+
"Ios16.batchNorm" : 9,
|
| 28 |
+
"Ios16.einsum" : 192,
|
| 29 |
+
"Ios16.gelu" : 6,
|
| 30 |
+
"Ios16.softmax" : 96,
|
| 31 |
+
"Ios16.layerNorm" : 9,
|
| 32 |
+
"Ios16.conv" : 26
|
| 33 |
+
},
|
| 34 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
| 35 |
+
"isUpdatable" : "0",
|
| 36 |
+
"availability" : {
|
| 37 |
+
"macOS" : "13.0",
|
| 38 |
+
"tvOS" : "16.0",
|
| 39 |
+
"visionOS" : "1.0",
|
| 40 |
+
"watchOS" : "9.0",
|
| 41 |
+
"iOS" : "16.0",
|
| 42 |
+
"macCatalyst" : "16.0"
|
| 43 |
+
},
|
| 44 |
+
"modelType" : {
|
| 45 |
+
"name" : "MLModelType_mlProgram"
|
| 46 |
+
},
|
| 47 |
+
"userDefinedMetadata" : {
|
| 48 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 49 |
+
"com.github.apple.coremltools.version" : "8.0",
|
| 50 |
+
"com.github.apple.coremltools.source" : "torch==2.4.1"
|
| 51 |
+
},
|
| 52 |
+
"inputSchema" : [
|
| 53 |
+
{
|
| 54 |
+
"hasShapeFlexibility" : "0",
|
| 55 |
+
"isOptional" : "0",
|
| 56 |
+
"dataType" : "Float16",
|
| 57 |
+
"formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
|
| 58 |
+
"shortDescription" : "",
|
| 59 |
+
"shape" : "[1, 80, 1, 3000]",
|
| 60 |
+
"name" : "melspectrogram_features",
|
| 61 |
+
"type" : "MultiArray"
|
| 62 |
+
}
|
| 63 |
+
],
|
| 64 |
+
"generatedClassName" : "AudioEncoder",
|
| 65 |
+
"method" : "predict"
|
| 66 |
+
}
|
| 67 |
+
]
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mil
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mlmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:030d64a3ddd296d6f709691a66a870aab7ee9f19e5fe07e8086245fb85302802
|
| 3 |
+
size 54965
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3706dac8d9d4bec269d3cee10fa4eda39b4240a46091c8323c1731a8c6d59c2
|
| 3 |
+
size 16422784
|
openai_whisper-tiny.en/AudioEncoder.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d344dcf1192f28f38afc259079c9c0a2d26bf4c22e5066c35a7d05eed81f17c3
|
| 3 |
+
size 257776
|
openai_whisper-tiny.en/AudioEncoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3860a1f422710e98f060b7414c0c7034f4a1b6a819eec0530e2e57e30d891e72
|
| 3 |
+
size 16422784
|
openai_whisper-tiny.en/AudioEncoder.mlpackage/Manifest.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"fileFormatVersion": "1.0.0",
|
| 3 |
+
"itemInfoEntries": {
|
| 4 |
+
"743BE91C-5205-432D-80DC-67CC4DB2D65A": {
|
| 5 |
+
"author": "com.apple.CoreML",
|
| 6 |
+
"description": "CoreML Model Specification",
|
| 7 |
+
"name": "model.mlmodel",
|
| 8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
| 9 |
+
},
|
| 10 |
+
"96C95FCF-711E-4118-AD97-5B6E8A74B2BC": {
|
| 11 |
+
"author": "com.apple.CoreML",
|
| 12 |
+
"description": "CoreML Model Weights",
|
| 13 |
+
"name": "weights",
|
| 14 |
+
"path": "com.apple.CoreML/weights"
|
| 15 |
+
}
|
| 16 |
+
},
|
| 17 |
+
"rootModelIdentifier": "743BE91C-5205-432D-80DC-67CC4DB2D65A"
|
| 18 |
+
}
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:160d9737169d22dc01a899e1c6a0a9c44d0637d41f0dedb2a0b7c1422c4035d2
|
| 3 |
+
size 243
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb3b3f51b080f58b12a6888a5e8ad57419be9e4c6843b96a7577f171b300e660
|
| 3 |
+
size 328
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Float16",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[1, 80, 1, 3000]",
|
| 13 |
+
"name" : "melspectrogram_features",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
"modelParameters" : [
|
| 18 |
+
|
| 19 |
+
],
|
| 20 |
+
"specificationVersion" : 7,
|
| 21 |
+
"mlProgramOperationTypeHistogram" : {
|
| 22 |
+
"Pad" : 1,
|
| 23 |
+
"Ios16.mul" : 2,
|
| 24 |
+
"SliceByIndex" : 1,
|
| 25 |
+
"Ios16.sub" : 1,
|
| 26 |
+
"Ios16.log" : 1,
|
| 27 |
+
"Ios16.conv" : 2,
|
| 28 |
+
"Ios16.add" : 3,
|
| 29 |
+
"Ios16.square" : 2,
|
| 30 |
+
"Ios16.matmul" : 1,
|
| 31 |
+
"Squeeze" : 2,
|
| 32 |
+
"Ios16.maximum" : 1,
|
| 33 |
+
"ExpandDims" : 4,
|
| 34 |
+
"Ios16.reduceMax" : 1,
|
| 35 |
+
"Identity" : 1,
|
| 36 |
+
"Ios16.reshape" : 2
|
| 37 |
+
},
|
| 38 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
| 39 |
+
"isUpdatable" : "0",
|
| 40 |
+
"availability" : {
|
| 41 |
+
"macOS" : "13.0",
|
| 42 |
+
"tvOS" : "16.0",
|
| 43 |
+
"visionOS" : "1.0",
|
| 44 |
+
"watchOS" : "9.0",
|
| 45 |
+
"iOS" : "16.0",
|
| 46 |
+
"macCatalyst" : "16.0"
|
| 47 |
+
},
|
| 48 |
+
"modelType" : {
|
| 49 |
+
"name" : "MLModelType_mlProgram"
|
| 50 |
+
},
|
| 51 |
+
"userDefinedMetadata" : {
|
| 52 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 53 |
+
"com.github.apple.coremltools.source" : "torch==2.4.1",
|
| 54 |
+
"com.github.apple.coremltools.version" : "8.0"
|
| 55 |
+
},
|
| 56 |
+
"inputSchema" : [
|
| 57 |
+
{
|
| 58 |
+
"hasShapeFlexibility" : "0",
|
| 59 |
+
"isOptional" : "0",
|
| 60 |
+
"dataType" : "Float16",
|
| 61 |
+
"formattedType" : "MultiArray (Float16 480000)",
|
| 62 |
+
"shortDescription" : "",
|
| 63 |
+
"shape" : "[480000]",
|
| 64 |
+
"name" : "audio",
|
| 65 |
+
"type" : "MultiArray"
|
| 66 |
+
}
|
| 67 |
+
],
|
| 68 |
+
"generatedClassName" : "MelSpectrogram",
|
| 69 |
+
"method" : "predict"
|
| 70 |
+
}
|
| 71 |
+
]
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/model.mil
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
program(1.0)
|
| 2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
|
| 3 |
+
{
|
| 4 |
+
func main<ios16>(tensor<fp16, [480000]> audio) {
|
| 5 |
+
tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
| 6 |
+
tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = tensor<string, []>("input_1_cast_fp16")];
|
| 7 |
+
tensor<int32, [6]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
|
| 8 |
+
tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("reflect")];
|
| 9 |
+
tensor<fp16, []> const_1_to_fp16 = const()[name = tensor<string, []>("const_1_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 10 |
+
tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
| 11 |
+
tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
|
| 12 |
+
tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
| 13 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
| 14 |
+
tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
|
| 15 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
| 16 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
| 17 |
+
tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
|
| 18 |
+
tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
|
| 19 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 20 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 21 |
+
tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
|
| 22 |
+
tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 23 |
+
tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
|
| 24 |
+
tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
|
| 25 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 26 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 27 |
+
tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
|
| 28 |
+
tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160960)))];
|
| 29 |
+
tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
|
| 30 |
+
tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
|
| 31 |
+
tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
|
| 32 |
+
tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
|
| 33 |
+
tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
|
| 34 |
+
tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
|
| 35 |
+
tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
|
| 36 |
+
tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
|
| 37 |
+
tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = tensor<string, []>("magnitudes_1_cast_fp16")];
|
| 38 |
+
tensor<int32, [2]> magnitudes_begin_0 = const()[name = tensor<string, []>("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
| 39 |
+
tensor<int32, [2]> magnitudes_end_0 = const()[name = tensor<string, []>("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
|
| 40 |
+
tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = tensor<string, []>("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
| 41 |
+
tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = tensor<string, []>("magnitudes_cast_fp16")];
|
| 42 |
+
tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
| 43 |
+
tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
| 44 |
+
tensor<fp16, [80, 201]> mel_filters_to_fp16 = const()[name = tensor<string, []>("mel_filters_to_fp16"), val = tensor<fp16, [80, 201]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(321856)))];
|
| 45 |
+
tensor<fp16, [80, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
|
| 46 |
+
tensor<fp16, []> var_41_to_fp16 = const()[name = tensor<string, []>("op_41_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
| 47 |
+
tensor<fp16, [80, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
|
| 48 |
+
tensor<fp16, []> log_0_epsilon_0_to_fp16 = const()[name = tensor<string, []>("log_0_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 49 |
+
tensor<fp16, [80, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0_to_fp16, x = mel_spec_cast_fp16)[name = tensor<string, []>("log_0_cast_fp16")];
|
| 50 |
+
tensor<fp16, []> mul_0_y_0_to_fp16 = const()[name = tensor<string, []>("mul_0_y_0_to_fp16"), val = tensor<fp16, []>(0x1.bccp-2)];
|
| 51 |
+
tensor<fp16, [80, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
|
| 52 |
+
tensor<bool, []> var_44_keep_dims_0 = const()[name = tensor<string, []>("op_44_keep_dims_0"), val = tensor<bool, []>(false)];
|
| 53 |
+
tensor<fp16, []> var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
|
| 54 |
+
tensor<fp16, []> var_46_to_fp16 = const()[name = tensor<string, []>("op_46_to_fp16"), val = tensor<fp16, []>(0x1p+3)];
|
| 55 |
+
tensor<fp16, []> var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = tensor<string, []>("op_47_cast_fp16")];
|
| 56 |
+
tensor<fp16, [80, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = tensor<string, []>("log_spec_3_cast_fp16")];
|
| 57 |
+
tensor<fp16, []> var_50_to_fp16 = const()[name = tensor<string, []>("op_50_to_fp16"), val = tensor<fp16, []>(0x1p+2)];
|
| 58 |
+
tensor<fp16, [80, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
|
| 59 |
+
tensor<fp16, []> _inversed_log_spec_y_0_to_fp16 = const()[name = tensor<string, []>("_inversed_log_spec_y_0_to_fp16"), val = tensor<fp16, []>(0x1p-2)];
|
| 60 |
+
tensor<fp16, [80, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = tensor<string, []>("_inversed_log_spec_cast_fp16")];
|
| 61 |
+
tensor<int32, [1]> var_55_axes_0 = const()[name = tensor<string, []>("op_55_axes_0"), val = tensor<int32, [1]>([0])];
|
| 62 |
+
tensor<fp16, [1, 80, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
|
| 63 |
+
tensor<int32, [1]> var_62_axes_0 = const()[name = tensor<string, []>("op_62_axes_0"), val = tensor<int32, [1]>([2])];
|
| 64 |
+
tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
| 65 |
+
} -> (melspectrogram_features);
|
| 66 |
+
}
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:801024dbc7a89c677be1f8b285de3409e35f7d1786c9c8d9d0d6842ac57a1c83
|
| 3 |
+
size 354080
|
openai_whisper-tiny.en/MelSpectrogram.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e90f6d8c7ccaa25cecefaee45e4ac31c2fc9f8e0b7c0f69c4a01d8646add5d7
|
| 3 |
+
size 8950
|
openai_whisper-tiny.en/MelSpectrogram.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6419aa141b1b0f06ec2de0074a65cd7a5e2eb59fe93d43554ec857067d444891
|
| 3 |
+
size 354080
|
openai_whisper-tiny.en/MelSpectrogram.mlpackage/Manifest.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"fileFormatVersion": "1.0.0",
|
| 3 |
+
"itemInfoEntries": {
|
| 4 |
+
"2048FA7C-387C-4B5B-8A5A-3D0743C785BF": {
|
| 5 |
+
"author": "com.apple.CoreML",
|
| 6 |
+
"description": "CoreML Model Specification",
|
| 7 |
+
"name": "model.mlmodel",
|
| 8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
| 9 |
+
},
|
| 10 |
+
"5E0F5547-1F38-4DA8-BB4F-FE149347BD45": {
|
| 11 |
+
"author": "com.apple.CoreML",
|
| 12 |
+
"description": "CoreML Model Weights",
|
| 13 |
+
"name": "weights",
|
| 14 |
+
"path": "com.apple.CoreML/weights"
|
| 15 |
+
}
|
| 16 |
+
},
|
| 17 |
+
"rootModelIdentifier": "2048FA7C-387C-4B5B-8A5A-3D0743C785BF"
|
| 18 |
+
}
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:edb99a30ccee8e157fbec80dc3dce49349ba0982391b327d753e10ccab0a01c3
|
| 3 |
+
size 243
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65c043a081845d190918b4c7d244f94a55df1a15fae796abedc1f414995542c6
|
| 3 |
+
size 633
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"metadataOutputVersion" : "3.0",
|
| 4 |
+
"storagePrecision" : "Float16",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float16",
|
| 10 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 51864)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[1, 1, 51864]",
|
| 13 |
+
"name" : "logits",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"hasShapeFlexibility" : "0",
|
| 18 |
+
"isOptional" : "0",
|
| 19 |
+
"dataType" : "Float16",
|
| 20 |
+
"formattedType" : "MultiArray (Float16 1 × 1536 × 1 × 1)",
|
| 21 |
+
"shortDescription" : "",
|
| 22 |
+
"shape" : "[1, 1536, 1, 1]",
|
| 23 |
+
"name" : "key_cache_updates",
|
| 24 |
+
"type" : "MultiArray"
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"hasShapeFlexibility" : "0",
|
| 28 |
+
"isOptional" : "0",
|
| 29 |
+
"dataType" : "Float16",
|
| 30 |
+
"formattedType" : "MultiArray (Float16 1 × 1536 × 1 × 1)",
|
| 31 |
+
"shortDescription" : "",
|
| 32 |
+
"shape" : "[1, 1536, 1, 1]",
|
| 33 |
+
"name" : "value_cache_updates",
|
| 34 |
+
"type" : "MultiArray"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"hasShapeFlexibility" : "0",
|
| 38 |
+
"isOptional" : "0",
|
| 39 |
+
"dataType" : "Float16",
|
| 40 |
+
"formattedType" : "MultiArray (Float16 1 × 1500)",
|
| 41 |
+
"shortDescription" : "",
|
| 42 |
+
"shape" : "[1, 1500]",
|
| 43 |
+
"name" : "alignment_heads_weights",
|
| 44 |
+
"type" : "MultiArray"
|
| 45 |
+
}
|
| 46 |
+
],
|
| 47 |
+
"modelParameters" : [
|
| 48 |
+
|
| 49 |
+
],
|
| 50 |
+
"specificationVersion" : 7,
|
| 51 |
+
"mlProgramOperationTypeHistogram" : {
|
| 52 |
+
"Split" : 2,
|
| 53 |
+
"Concat" : 3,
|
| 54 |
+
"Squeeze" : 1,
|
| 55 |
+
"Ios16.mul" : 24,
|
| 56 |
+
"Ios16.layerNorm" : 13,
|
| 57 |
+
"SliceByIndex" : 16,
|
| 58 |
+
"Ios16.sub" : 1,
|
| 59 |
+
"Transpose" : 1,
|
| 60 |
+
"Ios16.conv" : 40,
|
| 61 |
+
"Ios16.add" : 25,
|
| 62 |
+
"Ios16.linear" : 1,
|
| 63 |
+
"Ios16.matmul" : 16,
|
| 64 |
+
"Ios16.gelu" : 4,
|
| 65 |
+
"Ios16.reduceMean" : 1,
|
| 66 |
+
"ExpandDims" : 6,
|
| 67 |
+
"Ios16.batchNorm" : 13,
|
| 68 |
+
"Ios16.gather" : 2,
|
| 69 |
+
"Ios16.reshape" : 32,
|
| 70 |
+
"Ios16.softmax" : 8
|
| 71 |
+
},
|
| 72 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
| 73 |
+
"isUpdatable" : "0",
|
| 74 |
+
"availability" : {
|
| 75 |
+
"macOS" : "13.0",
|
| 76 |
+
"tvOS" : "16.0",
|
| 77 |
+
"visionOS" : "1.0",
|
| 78 |
+
"watchOS" : "9.0",
|
| 79 |
+
"iOS" : "16.0",
|
| 80 |
+
"macCatalyst" : "16.0"
|
| 81 |
+
},
|
| 82 |
+
"modelType" : {
|
| 83 |
+
"name" : "MLModelType_mlProgram"
|
| 84 |
+
},
|
| 85 |
+
"userDefinedMetadata" : {
|
| 86 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 87 |
+
"com.github.apple.coremltools.source" : "torch==2.4.1",
|
| 88 |
+
"com.github.apple.coremltools.version" : "8.0"
|
| 89 |
+
},
|
| 90 |
+
"inputSchema" : [
|
| 91 |
+
{
|
| 92 |
+
"hasShapeFlexibility" : "0",
|
| 93 |
+
"isOptional" : "0",
|
| 94 |
+
"dataType" : "Int32",
|
| 95 |
+
"formattedType" : "MultiArray (Int32 1)",
|
| 96 |
+
"shortDescription" : "",
|
| 97 |
+
"shape" : "[1]",
|
| 98 |
+
"name" : "input_ids",
|
| 99 |
+
"type" : "MultiArray"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"hasShapeFlexibility" : "0",
|
| 103 |
+
"isOptional" : "0",
|
| 104 |
+
"dataType" : "Int32",
|
| 105 |
+
"formattedType" : "MultiArray (Int32 1)",
|
| 106 |
+
"shortDescription" : "",
|
| 107 |
+
"shape" : "[1]",
|
| 108 |
+
"name" : "cache_length",
|
| 109 |
+
"type" : "MultiArray"
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"hasShapeFlexibility" : "0",
|
| 113 |
+
"isOptional" : "0",
|
| 114 |
+
"dataType" : "Float16",
|
| 115 |
+
"formattedType" : "MultiArray (Float16 1 × 1536 × 1 × 448)",
|
| 116 |
+
"shortDescription" : "",
|
| 117 |
+
"shape" : "[1, 1536, 1, 448]",
|
| 118 |
+
"name" : "key_cache",
|
| 119 |
+
"type" : "MultiArray"
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"hasShapeFlexibility" : "0",
|
| 123 |
+
"isOptional" : "0",
|
| 124 |
+
"dataType" : "Float16",
|
| 125 |
+
"formattedType" : "MultiArray (Float16 1 × 1536 × 1 × 448)",
|
| 126 |
+
"shortDescription" : "",
|
| 127 |
+
"shape" : "[1, 1536, 1, 448]",
|
| 128 |
+
"name" : "value_cache",
|
| 129 |
+
"type" : "MultiArray"
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"hasShapeFlexibility" : "0",
|
| 133 |
+
"isOptional" : "0",
|
| 134 |
+
"dataType" : "Float16",
|
| 135 |
+
"formattedType" : "MultiArray (Float16 1 × 448)",
|
| 136 |
+
"shortDescription" : "",
|
| 137 |
+
"shape" : "[1, 448]",
|
| 138 |
+
"name" : "kv_cache_update_mask",
|
| 139 |
+
"type" : "MultiArray"
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"hasShapeFlexibility" : "0",
|
| 143 |
+
"isOptional" : "0",
|
| 144 |
+
"dataType" : "Float16",
|
| 145 |
+
"formattedType" : "MultiArray (Float16 1 × 384 × 1 × 1500)",
|
| 146 |
+
"shortDescription" : "",
|
| 147 |
+
"shape" : "[1, 384, 1, 1500]",
|
| 148 |
+
"name" : "encoder_output_embeds",
|
| 149 |
+
"type" : "MultiArray"
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"hasShapeFlexibility" : "0",
|
| 153 |
+
"isOptional" : "0",
|
| 154 |
+
"dataType" : "Float16",
|
| 155 |
+
"formattedType" : "MultiArray (Float16 1 × 448)",
|
| 156 |
+
"shortDescription" : "",
|
| 157 |
+
"shape" : "[1, 448]",
|
| 158 |
+
"name" : "decoder_key_padding_mask",
|
| 159 |
+
"type" : "MultiArray"
|
| 160 |
+
}
|
| 161 |
+
],
|
| 162 |
+
"generatedClassName" : "TextDecoder",
|
| 163 |
+
"method" : "predict"
|
| 164 |
+
}
|
| 165 |
+
]
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mlmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c3e91bc036014426708e2ceb0e35cb1bbbf34e8121d2070d2b174a7957581d0
|
| 3 |
+
size 108558
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:763f915f0126093fc2c506572b3ab0fad134c04cfc2221333ccc7d73552c9252
|
| 3 |
+
size 59215664
|
openai_whisper-tiny.en/TextDecoder.mlpackage/Data/com.apple.CoreML/model.mlmodel
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c3e91bc036014426708e2ceb0e35cb1bbbf34e8121d2070d2b174a7957581d0
|
| 3 |
+
size 108558
|
openai_whisper-tiny.en/TextDecoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa0a312346dc9aa97fab50e99d820c997968dfddd62695878086ba87795ca126
|
| 3 |
+
size 59215664
|
openai_whisper-tiny.en/TextDecoder.mlpackage/Manifest.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"fileFormatVersion": "1.0.0",
|
| 3 |
+
"itemInfoEntries": {
|
| 4 |
+
"4F20E9B0-3984-4E42-84AA-8BBF92A2B7B2": {
|
| 5 |
+
"author": "com.apple.CoreML",
|
| 6 |
+
"description": "CoreML Model Specification",
|
| 7 |
+
"name": "model.mlmodel",
|
| 8 |
+
"path": "com.apple.CoreML/model.mlmodel"
|
| 9 |
+
},
|
| 10 |
+
"B01071E1-B67E-4E85-830E-5EDD7EE041A2": {
|
| 11 |
+
"author": "com.apple.CoreML",
|
| 12 |
+
"description": "CoreML Model Weights",
|
| 13 |
+
"name": "weights",
|
| 14 |
+
"path": "com.apple.CoreML/weights"
|
| 15 |
+
}
|
| 16 |
+
},
|
| 17 |
+
"rootModelIdentifier": "4F20E9B0-3984-4E42-84AA-8BBF92A2B7B2"
|
| 18 |
+
}
|
openai_whisper-tiny.en/config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"_name_or_path": "openai/whisper-tiny.en", "activation_dropout": 0.0, "activation_function": "gelu", "architectures": ["WhisperForConditionalGeneration"], "attention_dropout": 0.0, "begin_suppress_tokens": [220, 50256], "bos_token_id": 50257, "d_model": 384, "decoder_attention_heads": 6, "decoder_ffn_dim": 1536, "decoder_layerdrop": 0.0, "decoder_layers": 4, "decoder_start_token_id": 50257, "dropout": 0.0, "encoder_attention_heads": 6, "encoder_ffn_dim": 1536, "encoder_layerdrop": 0.0, "encoder_layers": 4, "eos_token_id": 50256, "forced_decoder_ids": [[1, 50362]], "init_std": 0.02, "is_encoder_decoder": true, "max_length": 448, "max_source_positions": 1500, "max_target_positions": 448, "model_type": "whisper", "num_hidden_layers": 4, "num_mel_bins": 80, "pad_token_id": 50256, "scale_embedding": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786, 11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791, 17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409, 34949, 40283, 40493, 40549, 47282, 49146, 50257, 50357, 50358, 50359, 50360, 50361], "torch_dtype": "float32", "transformers_version": "4.27.0.dev0", "use_cache": true, "vocab_size": 51864}
|