Tim77777767 commited on
Commit
288ccaa
·
1 Parent(s): 66c5431

Anpassungen an den COnfigs

Browse files
Files changed (2) hide show
  1. config.json +4 -1
  2. mix_vision_transformer_config.py +12 -5
config.json CHANGED
@@ -1,17 +1,20 @@
1
  {
 
2
  "in_channels": 3,
3
  "embed_dims": [32, 64, 160, 256],
4
  "num_stages": 4,
5
  "num_layers": [2, 2, 2, 2],
6
  "num_heads": [1, 2, 5, 8],
7
  "patch_sizes": [7, 3, 3, 3],
 
8
  "sr_ratios": [8, 4, 2, 1],
9
- "out_indices": [0, 1, 2, 3],
10
  "mlp_ratio": 4,
11
  "qkv_bias": true,
12
  "drop_rate": 0.0,
13
  "attn_drop_rate": 0.0,
14
  "drop_path_rate": 0.1,
 
 
15
  "decode_head": {
16
  "in_channels": [32, 64, 160, 256],
17
  "in_index": [0, 1, 2, 3],
 
1
  {
2
+ "model_type": "my_segformer",
3
  "in_channels": 3,
4
  "embed_dims": [32, 64, 160, 256],
5
  "num_stages": 4,
6
  "num_layers": [2, 2, 2, 2],
7
  "num_heads": [1, 2, 5, 8],
8
  "patch_sizes": [7, 3, 3, 3],
9
+ "strides": [4, 2, 2, 2],
10
  "sr_ratios": [8, 4, 2, 1],
 
11
  "mlp_ratio": 4,
12
  "qkv_bias": true,
13
  "drop_rate": 0.0,
14
  "attn_drop_rate": 0.0,
15
  "drop_path_rate": 0.1,
16
+ "out_indices": [0, 1, 2, 3],
17
+ "num_classes": 19,
18
  "decode_head": {
19
  "in_channels": [32, 64, 160, 256],
20
  "in_index": [0, 1, 2, 3],
mix_vision_transformer_config.py CHANGED
@@ -5,10 +5,11 @@ class MySegformerConfig(PretrainedConfig):
5
 
6
  def __init__(
7
  self,
8
- embed_dims=[64, 128, 320, 512],
 
9
  num_stages=4,
10
- num_layers=[3, 4, 6, 3],
11
- num_heads=[1, 2, 4, 8],
12
  patch_sizes=[7, 3, 3, 3],
13
  strides=[4, 2, 2, 2],
14
  sr_ratios=[8, 4, 2, 1],
@@ -16,16 +17,19 @@ class MySegformerConfig(PretrainedConfig):
16
  qkv_bias=True,
17
  drop_rate=0.0,
18
  attn_drop_rate=0.0,
19
- drop_path_rate=0.0,
20
  out_indices=(0, 1, 2, 3),
 
 
21
  **kwargs
22
  ):
23
  super().__init__(**kwargs)
24
 
25
- # Absicherung, falls embed_dims als int übergeben wird
26
  if isinstance(embed_dims, int):
27
  embed_dims = [embed_dims]
28
 
 
29
  self.embed_dims = embed_dims
30
  self.num_stages = num_stages
31
  self.num_layers = num_layers
@@ -39,4 +43,7 @@ class MySegformerConfig(PretrainedConfig):
39
  self.attn_drop_rate = attn_drop_rate
40
  self.drop_path_rate = drop_path_rate
41
  self.out_indices = out_indices
 
42
 
 
 
 
5
 
6
  def __init__(
7
  self,
8
+ in_channels=3,
9
+ embed_dims=[32, 64, 160, 256],
10
  num_stages=4,
11
+ num_layers=[2, 2, 2, 2],
12
+ num_heads=[1, 2, 5, 8],
13
  patch_sizes=[7, 3, 3, 3],
14
  strides=[4, 2, 2, 2],
15
  sr_ratios=[8, 4, 2, 1],
 
17
  qkv_bias=True,
18
  drop_rate=0.0,
19
  attn_drop_rate=0.0,
20
+ drop_path_rate=0.1,
21
  out_indices=(0, 1, 2, 3),
22
+ num_classes=19,
23
+ decode_head=None,
24
  **kwargs
25
  ):
26
  super().__init__(**kwargs)
27
 
28
+ # Falls embed_dims als int angegeben ist, Liste erzwingen
29
  if isinstance(embed_dims, int):
30
  embed_dims = [embed_dims]
31
 
32
+ self.in_channels = in_channels
33
  self.embed_dims = embed_dims
34
  self.num_stages = num_stages
35
  self.num_layers = num_layers
 
43
  self.attn_drop_rate = attn_drop_rate
44
  self.drop_path_rate = drop_path_rate
45
  self.out_indices = out_indices
46
+ self.num_classes = num_classes
47
 
48
+ # optionaler Block für Head-Config (falls gebraucht)
49
+ self.decode_head = decode_head