Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DropBlock1D #6

Open
skgone opened this issue Apr 25, 2020 · 1 comment
Open

DropBlock1D #6

skgone opened this issue Apr 25, 2020 · 1 comment

Comments

@skgone
Copy link

skgone commented Apr 25, 2020

thank you for your great work!
i want to use dropblock1D for time series ,how to implement dropblock1D?

@skgone
Copy link
Author

skgone commented Apr 28, 2020

i modified it to 1D,but the performance is worse than dropout, may be something is wrong.

class DropBlock1D(tf.keras.layers.Layer):

def __init__(self, keep_prob, block_size, scale=True, **kwargs):
    super(DropBlock1D, self).__init__(**kwargs)
    self.keep_prob = float(keep_prob) if isinstance(keep_prob, int) else keep_prob
    self.block_size = int(block_size)
    self.scale = tf.constant(scale, dtype=tf.bool) if isinstance(scale, bool) else scale

def compute_output_shape(self, input_shape):
    return input_shape

def build(self, input_shape):
    assert len(input_shape) == 3
    _, self.h, self.channel = input_shape.as_list()
    # pad the mask
    p1 = (self.block_size - 1) // 2
    p0 = (self.block_size - 1) - p1
    self.padding = [[0, 0], [p0, p1], [0, 0]]
    self.set_keep_prob()
    super(DropBlock1D, self).build(input_shape)

def call(self, inputs, training=None, **kwargs):
    def drop():
        mask = self._create_mask(tf.shape(inputs))
        output = inputs * mask
        output = tf.cond(self.scale,
                         true_fn=lambda: output * tf.compat.v1.to_float(tf.size(mask)) / tf.reduce_sum(mask),
                         false_fn=lambda: output)
        return output

    if training is None:
        training = K.learning_phase()
    output = tf.cond(tf.logical_or(tf.logical_not(training), tf.equal(self.keep_prob, 1.0)),
                     true_fn=lambda: inputs,
                     false_fn=drop)
    return output

def set_keep_prob(self, keep_prob=None):
    """This method only supports Eager Execution"""
    if keep_prob is not None:
        self.keep_prob = keep_prob
    h = tf.compat.v1.to_float(self.h)
    self.gamma = ((1. - self.keep_prob) /self.block_size)*(h/h-self.block_size+1)

def _create_mask(self, input_shape):
    sampling_mask_shape = tf.stack([input_shape[0],
                                   self.h - self.block_size + 1,
                                   self.channel])
    mask = _bernoulli(sampling_mask_shape, self.gamma)
    mask = tf.pad(mask, self.padding)
    mask = tf.nn.max_pool(mask, [1, self.block_size, 1], [1, 1, 1], 'SAME')
    mask = 1 - mask
    return mask

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant