-
Notifications
You must be signed in to change notification settings - Fork 24
Expand file tree
/
Copy pathmodels.py
More file actions
115 lines (90 loc) · 3.74 KB
/
models.py
File metadata and controls
115 lines (90 loc) · 3.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Flatten, Conv3D, Conv3DTranspose, Dropout, ReLU, LeakyReLU, Concatenate, ZeroPadding3D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import MeanSquaredError
import tensorflow_addons as tfa
from tensorflow_addons.layers import InstanceNormalization
def Generator():
'''
Generator model
'''
def encoder_step(layer, Nf, ks, norm=True):
x = Conv3D(Nf, kernel_size=ks, strides=2, kernel_initializer='he_normal', padding='same')(layer)
if norm:
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Dropout(0.2)(x)
return x
def bottlenek(layer, Nf, ks):
x = Conv3D(Nf, kernel_size=ks, strides=2, kernel_initializer='he_normal', padding='same')(layer)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
for i in range(4):
y = Conv3D(Nf, kernel_size=ks, strides=1, kernel_initializer='he_normal', padding='same')(x)
x = InstanceNormalization()(y)
x = LeakyReLU()(x)
x = Concatenate()([x, y])
return x
def decoder_step(layer, layer_to_concatenate, Nf, ks):
x = Conv3DTranspose(Nf, kernel_size=ks, strides=2, padding='same', kernel_initializer='he_normal')(layer)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Concatenate()([x, layer_to_concatenate])
x = Dropout(0.2)(x)
return x
layers_to_concatenate = []
inputs = Input((128,128,128,4), name='input_image')
Nfilter_start = 64
depth = 4
ks = 4
x = inputs
# encoder
for d in range(depth-1):
if d==0:
x = encoder_step(x, Nfilter_start*np.power(2,d), ks, False)
else:
x = encoder_step(x, Nfilter_start*np.power(2,d), ks)
layers_to_concatenate.append(x)
# bottlenek
x = bottlenek(x, Nfilter_start*np.power(2,depth-1), ks)
# decoder
for d in range(depth-2, -1, -1):
x = decoder_step(x, layers_to_concatenate.pop(), Nfilter_start*np.power(2,d), ks)
# classifier
last = Conv3DTranspose(4, kernel_size=ks, strides=2, padding='same', kernel_initializer='he_normal', activation='softmax', name='output_generator')(x)
return Model(inputs=inputs, outputs=last, name='Generator')
def Discriminator():
'''
Discriminator model
'''
inputs = Input((128,128,128,4), name='input_image')
targets = Input((128,128,128,4), name='target_image')
Nfilter_start = 64
depth = 3
ks = 4
def encoder_step(layer, Nf, norm=True):
x = Conv3D(Nf, kernel_size=ks, strides=2, kernel_initializer='he_normal', padding='same')(layer)
if norm:
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Dropout(0.2)(x)
return x
x = Concatenate()([inputs, targets])
for d in range(depth):
if d==0:
x = encoder_step(x, Nfilter_start*np.power(2,d), False)
else:
x = encoder_step(x, Nfilter_start*np.power(2,d))
x = ZeroPadding3D()(x)
x = Conv3D(Nfilter_start*(2**depth), ks, strides=1, padding='valid', kernel_initializer='he_normal')(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = ZeroPadding3D()(x)
last = Conv3D(1, ks, strides=1, padding='valid', kernel_initializer='he_normal', name='output_discriminator')(x)
return Model(inputs=[targets, inputs], outputs=last, name='Discriminator')
def ensembler():
start = Input((128,128,128,40))
fin = Conv3D(4, kernel_size=3, kernel_initializer='he_normal', padding='same', activation='softmax')(start)
return Model(inputs=start, outputs=fin, name='Ensembler')