seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf def fn_not_eos(): return tf.not_equal( # Check if the last predicted element is a EOS tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID) not_eos = tf.cond( # We only check for early stoping if there is at least 1 element ( # otherwise not_eos will crash) tf.not_equal(length, 0), fn_not_eos, lambda: True, ) return tf.cond( tf.equal(batch_size, 1), # If batch_size == 1, we check EOS for early stoping lambda: tf.logical_and(not_overflow, not_eos), # Else, just wait for max length lambda: not_overflow) return not_overflow result, logits, loss = tf.while_loop( while_exit_cond, infer_step, [result, logits, loss], shape_invariants=[ tf.TensorShape([None, None, None, None]), tf.TensorShape([None, None, None, None, None]), tf.TensorShape([]),
tensorflow.equal
5,200
import tensorflow as tf encoder_input_length=None, feed_argmax=True, rewards=None, use_baseline=True, training=True, global_step=None, monotonicity_weight=None, monotonicity_dist=None, monotonicity_decay=None, **kwargs): decoder = decoders[0] targets = targets[0] # single decoder if encoder_input_length is None: encoder_input_length = [] for encoder_inputs_ in encoder_inputs: mask = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True) encoder_input_length.append(tf.to_int32(tf.reduce_sum(mask, axis=1))) parameters = dict(encoders=encoders, decoder=decoder, encoder_inputs=encoder_inputs, feed_argmax=feed_argmax, training=training) attention_states, encoder_state, encoder_input_length = multi_encoder( encoder_input_length=encoder_input_length, **parameters) outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous,
tensorflow.reduce_sum
5,201
import tensorflow as tf n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions) stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions, lambda: deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, tf.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"), lambda: tf.group(*[])), tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)), update_param_noise_thres_expr, ] _act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) _perturbed_act = tf_util.function( inputs=[policy.obs_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
tensorflow.group
5,202
import tensorflow as tf logging.info("DropconnectDenseLayer %s: %d %s" % (name, n_units, act.__name__)) if W_init_args is None: W_init_args = {} if b_init_args is None: b_init_args = {} self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) # self.outputs = act(tf.matmul(self.inputs, W) + b) LayersConfig.set_keep[name] = tf.placeholder(tf.float32) W_dropcon = tf.nn.dropout(W, LayersConfig.set_keep[name]) self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b) # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) self.all_drop.update({LayersConfig.set_keep[name]: keep}) self.all_layers.append(self.outputs) self.all_params.extend([W, b])
tensorflow.get_variable
5,203
import tensorflow as tf horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred_flat = tf.reshape(horizon_pred, [-1]) tgt_flat = tf.reshape(horizon_tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) sample_func = sample_pair(batch)
tensorflow.reshape
5,204
from tensorflow.contrib.learn.python.learn.datasets import base def fake(): return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype) train = fake() validation = fake() test = fake() return base.Datasets(train=train, validation=validation, test=test) TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = base.maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES) train_images = extract_images(local_file) local_file = base.maybe_download(TRAIN_LABELS, train_dir, SOURCE_URL + TRAIN_LABELS) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = base.maybe_download(TEST_IMAGES, train_dir, SOURCE_URL + TEST_IMAGES) test_images = extract_images(local_file) local_file = base.maybe_download(TEST_LABELS, train_dir, SOURCE_URL + TEST_LABELS) test_labels = extract_labels(local_file, one_hot=one_hot)
tensorflow.contrib.learn.python.learn.datasets.base.maybe_download
5,205
import tensorflow as tf ): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"]
tensorflow.logging.info
5,206
import tensorflow as tf train_op = ema.apply(tf.trainable_variables()) return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op) def add_weight_decay(weight_decay): """Add L2 regularization to all (or some) trainable kernel weights.""" weight_decay = tf.constant( weight_decay, tf.float32, [], 'weight_decay' ) trainable_vars = tf.trainable_variables() kernels = [ v for v in trainable_vars if ('weights' in v.name or 'kernel' in v.name) and 'depthwise_weights' not in v.name ] for K in kernels: x = tf.multiply(weight_decay, tf.nn.l2_loss(K)) tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, x) class RestoreMovingAverageHook(tf.train.SessionRunHook):
tensorflow.trainable_variables
5,207
import tensorflow as tf worker_device=worker, ps_device='/cpu:0', ps_tasks=1) with tf.name_scope('{}_{}'.format(mode, i)) as scope: with tf.device(device_setter): net_outputs = self._model(shards[i], mode, **self.config) if mode == Mode.TRAIN: loss = self._loss(net_outputs, shards[i], **self.config) loss += tf.reduce_sum( tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope)) model_params = tf.trainable_variables() grad = tf.gradients(loss, model_params) tower_losses.append(loss) tower_gradvars.append(zip(grad, model_params)) if i == 0: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope) elif mode == Mode.EVAL: tower_metrics.append(self._metrics( net_outputs, shards[i], **self.config)) else: tower_preds.append(net_outputs) if mode == Mode.TRAIN: return tower_losses, tower_gradvars, update_ops elif mode == Mode.EVAL: return tower_metrics else: return tower_preds
tensorflow.get_collection
5,208
import tensorflow as tf writer = tf.train.SummaryWriter('logs/', sess.graph) else: # tensorflow version >= 0.12 writer = tf.summary.FileWriter("logs/", sess.graph) if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: init = tf.initialize_all_variables() else: init = tf.global_variables_initializer()
tensorflow.__version__.split
5,209
import tensorflow as tf output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1") output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2") with tf.variable_scope("loss"): loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses) with tf.variable_scope("evaluation"): accuracy_1 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_1, axis=-1), tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_2, axis=-1), tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2") accuracy = tf.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy") with tf.variable_scope("train"): global_step = tf.get_variable("global_step", shape=(), dtype=tf.int32, trainable=False) train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step) with tf.variable_scope("summary"): summary_loss_total = tf.summary.scalar("loss_total", loss_total) summary_accuracy_test = tf.summary.scalar("accuracy_test", accuracy)
tensorflow.argmax
5,210
import tensorflow as tf int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16) s_w2, s_w4, s_w8, s_w16 = \ int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16) output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin')) output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8]) output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3), [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1')) output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3), [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2')) output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3), [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3')) output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3), [self.batch_size, s_h, s_w, self.c_dim], name='d_h4') scope.reuse_variables()
tensorflow.concat
5,211
import tensorflow as tf w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer) h_logits += tf.matmul(context, w_ctx2out) if self.prev2out: h_logits += x h_logits = tf.nn.tanh(h_logits) if dropout: h_logits = tf.nn.dropout(h_logits, 0.5) out_logits = tf.matmul(h_logits, w_out) + b_out
tensorflow.nn.tanh
5,212
from tensorflow.python.framework import ops start: A 0-D (scalar) of type `int32`. First entry in sequence. limit: A 0-D (scalar) of type `int32`. Upper limit of sequence, exclusive. delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1. Number that increments `start`. name: A name for the operation (optional). Returns: An 1-D `int32` `Tensor`. """ return gen_math_ops._range(start, limit, delta, name=name) @ops.RegisterShape("Range") def _RangeShape(op): start_value = tensor_util.ConstantValue(op.inputs[0]) limit_value = tensor_util.ConstantValue(op.inputs[1]) delta_value = tensor_util.ConstantValue(op.inputs[2]) if start_value is None or limit_value is None or delta_value is None: return [tensor_shape.vector(None)] else: return [tensor_shape.vector((limit_value - start_value + delta_value - 1) // delta_value)] # Reduction operations
tensorflow.python.framework.ops.RegisterShape
5,213
import tensorflow as tf global_step, decay_steps=lr_decay_params["lr_decay_steps"], decay_rate=lr_decay_params["lr_decay_rate"], staircase=lr_decay_params.get("staircase", True), ) optimizer_params["learning_rate"] = learning_rate var_list = None if self.params["warm_start_dir"]: output_vars1 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="task_dependent") output_vars2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="task_independent/Variable_1") var_list = [output_vars1, output_vars2] train_op = tf.train.AdamOptimizer( # learning_rate=self.params["learning_rate"] # **self.params.get("optimizer_params", {}) # learning_rate=learning_rate **optimizer_params ).minimize(loss, global_step=global_step, var_list=var_list)
tensorflow.get_collection
5,214
import tensorflow as tf def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss def sample_pair(batch): num_sam = tools.shape(batch)[0] index = tf.range(num_sam) tgt1 = tf.slice(batch, [0, 1], [num_sam, 1]) pred1 = tf.slice(batch, [0, 0], [num_sam, 1])
tensorflow.math.top_k
5,215
import tensorflow as tf padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz') tf.summary.image('visualize', tf.expand_dims(stacked, -1), max_outputs=30) sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat') logits = (LinearWrap(sampled) .FullyConnected('fc1', out_dim=256, nl=tf.nn.relu) .FullyConnected('fc2', out_dim=128, nl=tf.nn.relu) .FullyConnected('fct', out_dim=19, nl=tf.identity)()) tf.nn.softmax(logits, name='prob') cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss')
tensorflow.concat
5,216
import tensorflow as tf "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.")
tensorflow.flags.DEFINE_string
5,217
import tensorflow as tf with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for _ in range(1):
tensorflow.global_variables_initializer
5,218
from tensorflow.python.ops import array_ops mask_logits(end_logits, mask=tf.reshape(self.c_mask, [N, -1]))] self.logits1, self.logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.logits1), axis=2), tf.expand_dims(tf.nn.softmax(self.logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) def _compute_loss(self): def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2): logits = tf.nn.sigmoid(logits) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros) neg_p_sub = array_ops.where(labels > zeros, zeros, logits) cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - logits, 1e-8, 1.0)) return tf.reduce_sum(cross_ent, 1) start_label = tf.one_hot(self.start_label, tf.shape(self.logits1)[1], axis=1) end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1) if self.config.loss_type == 'cross_entropy': start_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits1, labels=start_label) end_loss = tf.nn.softmax_cross_entropy_with_logits(
tensorflow.python.ops.array_ops.zeros_like
5,219
import tensorflow as tf biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype) bias = tf.nn.bias_add(conv_bn, biases) conv1 = tf.nn.relu(bias) return conv1
tensorflow.nn.relu
5,220
from tensorflow.python.ops import variable_scope as vs return self._num_units def __call__(self, inputs, state, att_score): return self.call(inputs, state, att_score) def call(self, inputs, state, att_score=None): """Gated recurrent unit (GRU) with nunits cells.""" if self._gate_linear is None: bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate. self._gate_linear = _Linear( [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state]))
tensorflow.python.ops.variable_scope.variable_scope
5,221
import tensorflow as tf if pos is not None: pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) if pos is not None and encoder.attn_window_size > 0: # `pred_edits` scenario, where we know the aligned pos # when the windows size is non-zero, we concatenate consecutive encoder states # and map it to the right attention vector size. weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = [] for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1): pos_ = pos + offset pos_ = tf.minimum(pos_, encoder_input_length - 1) pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S> weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length)) weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1) weighted_average.append(weighted_average_) weighted_average = tf.concat(weighted_average, axis=1) weighted_average = dense(weighted_average, encoder.attn_size) elif pos is not None: weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) else: # Local attention of Luong et al. (http://arxiv.org/abs/1508.04025) wp = get_variable('Wp', [state_size, state_size]) vp = get_variable('vp', [state_size, 1])
tensorflow.maximum
5,222
import tensorflow as tf def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None, context=None, **kwargs): batch_size = tf.shape(state)[0] attn_length = tf.shape(hidden_states)[1] if context is not None and encoder.use_context: state = tf.concat([state, context], axis=1) state_size = state.get_shape()[1].value with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)): encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1)) if pos is not None: pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) if pos is not None and encoder.attn_window_size > 0: # `pred_edits` scenario, where we know the aligned pos # when the windows size is non-zero, we concatenate consecutive encoder states # and map it to the right attention vector size. weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = [] for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1): pos_ = pos + offset pos_ = tf.minimum(pos_, encoder_input_length - 1) pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S> weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length))
tensorflow.reshape
5,223
import tensorflow as tf blk_shape = tf.shape(blk_indices) ksize = tf.shape(w)
tensorflow.shape
5,224
import tensorflow as tf gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction) config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options)
tensorflow.ConfigProto
5,225
import tensorflow as tf reg += self.L1_out * tf.reduce_mean(tf.abs(self.W_out) * self.output_Connectivity) # L2 weight regularization reg += self.L2_in * tf.reduce_mean(tf.square(tf.abs(self.W_in) * self.input_Connectivity)) reg += self.L2_rec * tf.reduce_mean(tf.square(tf.abs(self.W_rec) * self.rec_Connectivity)) if self.dale_ratio: reg += self.L2_out * tf.reduce_mean(tf.square( tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out)))
tensorflow.abs
5,226
import tensorflow as tf with self.session(use_gpu=False) as sess: tf.set_random_seed(93820985)
tensorflow.set_random_seed
5,227
import tensorflow as tf strides = [1, 1, stride, stride] bshape = [1, nf, 1, 1] else: raise NotImplementedError bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1] nin = x.get_shape()[channel_ax].value wshape = [rf, rf, nin, nf] with tf.variable_scope(scope): w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale)) b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = tf.reshape(b, bshape) return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale)) print("w is "+str(w)) b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w)+b def batch_to_seq(h, nbatch, nsteps, flat=False): if flat:
tensorflow.nn.conv2d
5,228
import tensorflow as tf CHANGES - Added one more block consisting of 3 conv layers and 1 max pool layer - kernel size was changed (3,3) -> (8,8), strides from 1 to 4, to get 1 x 1 in last layer - removed policy layers ''' w_init = tf.contrib.layers.variance_scaling_initializer() # glimpse1 = tf.image.extract_glimpse(inputs, [glimpse_size1,glimpse_size1], self.prev_loc, centered=True, normalized=True) # glimpse2 = tf.image.extract_glimpse(inputs, [glimpse_size2,glimpse_size2], self.prev_loc, centered=True, normalized=True)
tensorflow.contrib.layers.variance_scaling_initializer
5,229
import tensorflow as tf dtype=input_data_type, stddev=1e-1, name='synthetic_images') images = tf.contrib.framework.local_variable( images, name='gpu_cached_images') labels = host_labels with tf.device(self.devices[device_num]): # Rescale to [0, 1) images *= 1. / 256 # Rescale to [-1,1] instead of [0, 1) images = tf.subtract(images, 0.5) images = tf.multiply(images, 2.0) if self.data_format == 'NCHW': images = tf.transpose(images, [0, 3, 1, 2]) if input_data_type != data_type: images = tf.cast(images, data_type) network = ConvNetBuilder( images, input_nchan, phase_train, self.data_format, data_type) self.model_conf.add_inference(network) # Add the final fully-connected class layer logits = network.affine(nclass, activation='linear') if not phase_train: top_1_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 1), data_type)) top_5_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 5), data_type)) return (logits, top_1_op, top_5_op) loss = loss_function(logits, labels)
tensorflow.transpose
5,230
import tensorflow as tf if "GuidedBackProp" not in ops._gradient_registry._registry: @ops.RegisterGradient("GuidedBackProp") def _GuidedBackProp(op, grad): dtype = op.inputs[0].dtype return grad * tf.cast(grad > 0., dtype) * \ tf.cast(op.inputs[0] > 0., dtype) def compile_saliency_function(model, activation_layer='block5_conv3'): #mixed10 'activation_49' add_16 add_32 activation_98
tensorflow.cast
5,231
from tensorflow.python.ops import math_ops if curve == 'ROC': fp_rate = math_ops.div(fp, fp + tn + epsilon) x = fp_rate y = recall else: # curve == 'PR'. precision = math_ops.div(tp + epsilon, tp + fp + epsilon) x = recall y = precision return math_ops.reduce_sum(math_ops.mul( x[:num_thresholds - 1] - x[1:], (y[:num_thresholds - 1] + y[1:]) / 2.), name=name) # sum up the areas of all the trapeziums auc = compute_auc(tp, fn, tn, fp, 'value') update_op = compute_auc( tp_update_op, fn_update_op, tn_update_op, fp_update_op, 'update_op')
tensorflow.python.ops.math_ops.mul
5,232
import tensorflow as tf self.optimizer_Adam = tf.compat.v1.train.AdamOptimizer()
tensorflow.compat.v1.train.AdamOptimizer
5,233
import tensorflow as tf logits = cifar10.inference(images) # Build the portion of the Graph calculating the losses. Note that we will # assemble the total_loss using a custom function below. _ = cifar10.loss(logits, labels) # Assemble all of the losses for the current tower only. losses = tf.get_collection('losses', scope) # Calculate the total loss for the current tower. total_loss = tf.add_n(losses, name='total_loss') # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses.
tensorflow.get_collection
5,234
import tensorflow as tf y += dense(pos_feats, encoder.attn_size, use_bias=False, name='P_a') if encoder.attn_filters: filter_shape = [encoder.attn_filter_length * 2 + 1, 1, 1, encoder.attn_filters] filter_ = get_variable('filter', filter_shape) prev_weights = tf.reshape(prev_weights, tf.stack([batch_size, time_steps, 1, 1])) conv = tf.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME') conv = tf.squeeze(conv, axis=2) y += dense(conv, encoder.attn_size, use_bias=False, name='C_a') v = get_variable('v_a', [encoder.attn_size]) return tf.reduce_sum(v * tf.tanh(y), axis=2)
tensorflow.squeeze
5,235
import tensorflow as tf mu,var = tf.nn.moments(t,axes=[0,1,2]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2]) return tf.nn.bias_add( tf.nn.conv2d(input_var, w,data_format='NHWC', strides=self.strides, padding=self.padding), self.b,data_format='NHWC',name=name)
tensorflow.control_dependencies
5,236
import tensorflow as tf feature_emb_list.append(antecedent_distance_emb) feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb] feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb] target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]
tensorflow.nn.dropout
5,237
import tensorflow as tf batch_size = tf.shape(targets)[0] time_steps = tf.shape(targets)[1]
tensorflow.shape
5,238
from tensorflow.python.training import moving_averages second_moment: The second_moment value to update with. is_training: Boolean Tensor to indicate if we're currently in training mode. """ def build_update_ops(): """Builds the exponential moving average update ops.""" update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=mean, decay=self._decay_rate, name="update_moving_mean").op update_second_moment_op = moving_averages.assign_moving_average( variable=self._moving_second_moment,
tensorflow.python.training.moving_averages.assign_moving_average
5,239
import tensorflow as tf class Model: def __init__(self, num_layers, size_layers, learning_rate=1e-3, dropout=1.0): self.X = tf.placeholder(tf.int32, (None, None)) self.training = tf.placeholder(tf.bool, None) lookup_table = tf.get_variable(
tensorflow.placeholder
5,240
import tensorflow as tf # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_second_moment_op) def _build(self, input_batch, is_training=True, test_local_stats=True):
tensorflow.add_to_collection
5,241
import tensorflow as tf # Step-wise contrastive loss even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(pred, even) pred2 = tf.gather(pred, odd) tgt1 = tf.gather(tgt, even) tgt2 = tf.gather(tgt, odd) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) # loss = tf.maximum(0.0, tf.math.abs(tgt_larg - pred_larg) - tf.math.abs(tgt_small - pred_small)) loss = tf.reduce_mean(loss) return loss
tensorflow.where
5,242
import tensorflow as tf if self.data_shape is None: self.data_shape = output_shapes # Handle for the feedable iterator self.handle = tf.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle( self.handle, output_types, output_shapes) data = iterator.get_next() # Build the actual training and evaluation models self._train_graph(data) self._eval_graph(data) self.summaries = tf.summary.merge_all() # Prediction network with feed_dict self.pred_in = {i: tf.placeholder(self.input_spec[i]['type'], shape=s, name=i) for i, s in self.data_shape.items()} self._pred_graph(self.pred_in) # Start session sess_config = tf.ConfigProto(device_count={'GPU': self.n_gpus}) sess_config.gpu_options.allow_growth = True self.sess = tf.Session(config=sess_config) # Register tf dataset handles if self.datasets: self.dataset_handles = {} for n, i in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle())
tensorflow.placeholder
5,243
from tensorflow.python.framework import ops if train: with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]): with ops.colocate_with(mean): new_mean = tf.assign_sub( mean, tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean.")) with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub( var, tf.check_numerics(decay * (var - cur_var), "NaN in moving variance.")) with tf.name_scope(name, "IncrementTime", [step]): with ops.colocate_with(step):
tensorflow.python.framework.ops.colocate_with
5,244
import tensorflow as tf if not vars_to_load: raise ValueError('Variables to load is empty.') return tf.train.Scaffold() # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) grads_and_vars = optimizer.compute_gradients(total_loss, var_list) gradients, variables = zip(*grads_and_vars) grads_and_vars = [] # Special treatment for biases (beta is named as bias in reference model)
tensorflow.get_collection
5,245
import tensorflow as tf Aw = (self.r + self.alpha * self.ri) - self.worker_vf worker_vf_loss = .5 * tf.reduce_sum(tf.square(Aw))
tensorflow.square
5,246
import tensorflow as tf train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return {
tensorflow.argmax
5,247
import tensorflow as tf def testParallelAssignWithoutLocking(self): with self.test_session() as sess: ones_t = tf.fill([1024, 1024], float(1)) p = tf.Variable(tf.zeros([1024, 1024])) assigns = [tf.assign(p, tf.mul(ones_t, float(i)), False) for i in range(1, 21)] tf.initialize_all_variables().run() def run_assign(assign_op): sess.run(assign_op) threads = [self.checkedThread(target=run_assign, args=(assign_op,)) for assign_op in assigns]
tensorflow.initialize_all_variables
5,248
import tensorflow as tf for hop_edge_types, count in zip(edge_types, counts): neighbors, weights, types = sample_neighbor( neighbors_list[-1], hop_edge_types, count, default_node=default_node) neighbors_list.append(tf.reshape(neighbors, [-1])) weights_list.append(tf.reshape(weights, [-1])) type_list.append(tf.reshape(types, [-1])) return neighbors_list, weights_list, type_list
tensorflow.reshape
5,249
import tensorflow as tf import dnnlib.tflib as tflib from functools import partial def create_stub(name, batch_size): return tf.constant(0, dtype='float32', shape=(batch_size, 0)) def create_variable_for_generator(name, batch_size): return tf.get_variable('learnable_dlatents',
tensorflow.constant
5,250
import tensorflow as tf class ParameterizedTruncatedNormalGpuTest(ParameterizedTruncatedNormalTest): _use_gpu = True # Benchmarking code def parameterized_vs_naive(shape, num_iters, use_gpu=False): np.random.seed(1618) # Make it reproducible. # No CSE/CF. optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0) config = tf.ConfigProto( graph_options=tf.GraphOptions(optimizer_options=optimizer_options)) with tf.Session(config=config) as sess: with tf.device("/cpu:0" if not use_gpu else None): param_op = tf.group(random_ops.parameterized_truncated_normal(shape)) naive_op = tf.group(random_ops.truncated_normal(shape)) # Burn-in to avoid session setup costs in the timing. sess.run(param_op) sess.run(param_op) param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters) sess.run(naive_op) sess.run(naive_op) naive_dt = timeit.timeit(lambda: sess.run(naive_op), number=num_iters) return param_dt, naive_dt
tensorflow.Session
5,251
import tensorflow as tf """ def mlp_actor_critic(x, a, hidden_sizes=(400,300), activation=tf.nn.relu, output_activation=tf.tanh, action_space=None, dropout_rate=0, nn_type='mlp_variational'): act_dim = a.shape.as_list()[-1] act_limit = action_space.high[0] if nn_type == 'mlp': with tf.variable_scope('pi'): pi = act_limit * mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation) with tf.variable_scope('q1'): q1 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q2'): q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q1', reuse=True): q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) elif nn_type == 'mlp_dropout': with tf.variable_scope('pi'): pi = act_limit * mlp_dropout(x, list(hidden_sizes)+[act_dim], activation, output_activation) with tf.variable_scope('q'): q = tf.squeeze(mlp_dropout(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1)
tensorflow.concat
5,252
import tensorflow as tf if n_bits < 8: rgb = tf.floor(rgb/(2**(8-n_bits)))
tensorflow.floor
5,253
import tensorflow as tf initializer=initialization.xavier_initializer( shape=bias_shape, dtype=self.dtype, uniform=self.normal_initializer, mask=None))) elif self.lesion_mu: setattr( self, 'mu_%s' % layer, tf.constant(0.)) else: setattr( self, 'mu_%s' % layer, tf.constant(1.)) if self.gamma: setattr( self,
tensorflow.constant
5,254
import tensorflow as tf logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions):
tensorflow.reduce_sum
5,255
import tensorflow as tf tf.nn.nce_loss( weights=nce_weights, biases=nce_biases, inputs=embed, labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size, **nce_loss_args)) self.outputs = embed self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1) self.all_layers = [self.outputs] self.all_params = [embeddings, nce_weights, nce_biases] self.all_drop = {} class EmbeddingInputlayer(Layer): """ The :class:`EmbeddingInputlayer` class is a look-up table for word embedding.
tensorflow.nn.l2_normalize
5,256
import tensorflow as tf loss_summary = tf.summary.scalar('Loss', cross_entropy) acc_summary = tf.summary.scalar('Accuracy', accuracy) # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer())
tensorflow.global_variables
5,257
import tensorflow as tf tf.add_n(sharded_num) / tf.maximum(1.0, tf.add_n(sharded_den))) else: mean_loss = tf.reduce_mean(all_shards)
tensorflow.reduce_mean
5,258
import tensorflow as tf ) print_obj(func_name, "squared_difference", squared_difference) # Get gradient penalty scalar. gradient_penalty = tf.reduce_mean( input_tensor=squared_difference, name="gradient_penalty" ) print_obj(func_name, "gradient_penalty", gradient_penalty)
tensorflow.reduce_mean
5,259
import tensorflow as tf init = tf.initialize_all_variables() config=tf.ConfigProto()
tensorflow.ConfigProto
5,260
from tensorflow.python.ops import nn """ in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
tensorflow.python.ops.nn.in_top_k
5,261
import tensorflow as tf ... (?, 8) """ def __init__(self, inputs=None, depth=None, on_value=None, off_value=None, axis=None, dtype=None, name='input'): super(OneHotInputLayer, self).__init__(prev_layer=None, name=name) logging.info("OneHotInputLayer %s: %s" % (self.name, inputs.get_shape())) # assert depth != None, "depth is not given" if depth is None: logging.info(" [*] depth == None the number of output units is undefined") self.outputs = tf.one_hot(inputs, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) self.all_layers = [] self.all_params = [] self.all_drop = {} class Word2vecEmbeddingInputlayer(Layer): """ The :class:`Word2vecEmbeddingInputlayer` class is a fully connected layer. For Word Embedding, words are input as integer index. The output is the embedded word vector.
tensorflow.one_hot
5,262
from tensorflow.python.eager import context features["inputs"] = tf.expand_dims(features["inputs"], 2) if not self.has_input: features["partial_targets"] = tf.to_int64(features["inputs"]) # Save the targets in a var and reassign it after the tf.while loop to avoid # having targets being in a 'while' frame. This ensures targets when used # in metric functions stays in the same frame as other vars. targets_old = features.get("targets", None) target_modality = self._problem_hparams.target_modality def infer_step(recent_output, recent_logits, unused_loss): """Inference step.""" if not context.in_eager_mode(): recent_output.set_shape([None, None, None, 1]) padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]]) features["targets"] = padded # This is inefficient in that it generates samples at all timesteps, # not just the last one, except if target_modality is pointwise. samples, logits, losses = self.sample(features) # Concatenate the already-generated recent_output with last timestep # of the newly-generated samples. if target_modality.top_is_pointwise: cur_sample = samples[:, -1, :, :] else:
tensorflow.python.eager.context.in_eager_mode
5,263
import tensorflow as tf # self.simloss /= tf.reduce_mean(var) print(tgtimg_z.get_shape()) self.out = output_h4# + contextimg#tf.nn.tanh(h4) self.out2 = truthoutput_h4 self.recon1 = tf.nn.l2_loss(tgtimg - self.out) self.recon2 = tf.nn.l2_loss(tgtimg - self.out2) self.loss = self.recon1 + self.recon2 + self.simloss if ablation_type == "None": self.loss = self.recon1 + self.recon2 + self.simloss elif ablation_type == "L2":
tensorflow.nn.l2_loss
5,264
import tensorflow as tf from tensorflow.python.framework import errors from tensorflow.python.ops import script_ops class PyOpTest(tf.test.TestCase): def testBasic(self): def my_func(x, y): return np.sinh(x) + np.cosh(y) # scalar with self.test_session(): x = tf.constant(1.0, tf.float32) y = tf.constant(2.0, tf.float32) z = tf.py_func(my_func, [x, y], [tf.float32]) self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32)) # array with self.test_session(): x = tf.constant([1.0, 2.0], tf.float64) y = tf.constant([2.0, 3.0], tf.float64) z = tf.py_func(my_func, [x, y], [tf.float64]) self.assertAllEqual( z[0].eval(), my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64)) # a bit exotic type (complex64) with self.test_session(): x = tf.constant(1+2j, tf.complex64)
tensorflow.py_func
5,265
import tensorflow as tf self.eval_loss = interpreter.l2_loss(target, eval_decode, name='predictive_reconstruction') self.eval_decode = self._tensor_to_image(eval_decode) self.loss_ae = interpreter.l2_loss(target, model.decode, name='reconstruction') self.decode = self._tensor_to_image(model.decode) self.losses = [self.loss_ae] def build_predictive_model(self): self.build_ae_model() # builds on top of AE model. Due to auxilary operations init self.inputs = tf.placeholder(tf.uint8, [3] + self.batch_shape, name='inputs') self.targets = tf.placeholder(tf.uint8, [3] + self.batch_shape, name='targets') # transform inputs self.raw_inputs = [self._image_to_tensor(self.inputs[i]) for i in range(3)] self.raw_targets = [self._image_to_tensor(self.targets[i]) for i in range(3)] # build AE objective for triplet config = self.model.config models = [interpreter.build_autoencoder(x, config) for x in self.raw_inputs] reco_losses = [1./3 * interpreter.l2_loss(models[i].decode, self.raw_targets[i]) for i in range(3)] # business as usual
tensorflow.placeholder
5,266
import tensorflow as tf block_conv_3 = self.conv_layer(block_relu_2, 1, channel_list[1], channel_list[2], 1, name + "_branch2c") block_res = tf.add(block_conv_input, block_conv_3) relu = tf.nn.relu(block_res) return relu def avg_pool(self, bottom, kernal_size = 2, stride = 2, name = "avg"): return tf.nn.avg_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='VALID', name=name) def max_pool(self, bottom, kernal_size = 2, stride = 2, name = "max"): return tf.nn.max_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='SAME', name=name) def conv_layer(self, bottom, kernal_size, in_channels, out_channels, stride, name): with tf.variable_scope(name): filt, conv_biases = self.get_conv_var(kernal_size, in_channels, out_channels, name) conv = tf.nn.conv2d(bottom, filt, [1,stride,stride,1], padding='SAME') bias = tf.nn.bias_add(conv, conv_biases) tf.summary.histogram('weight', filt) tf.summary.histogram('bias', conv_biases)
tensorflow.nn.max_pool
5,267
import tensorflow as tf loss_summary = tf.summary.scalar('Loss', cross_entropy) acc_summary = tf.summary.scalar('Accuracy', accuracy) # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) # Training and validation for step in range(FLAGS.max_steps): # Training: Backpropagation using train set (trainImages, trainLabels) = cifar.getTrainBatch() (testImages, testLabels) = cifar.getTestBatch() _, summary_str = sess.run([optimiser, training_summary], feed_dict={x: trainImages, y_: trainLabels, train: True})
tensorflow.summary.FileWriter
5,268
import tensorflow as tf import tensorflow as tf import os #from PIL import Image import random import numpy as np from datetime import datetime from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report slim = tf.contrib.slim global first first = True classnum=12 testnum = tf.placeholder(tf.int32) trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8)
tensorflow.placeholder
5,269
import tensorflow as tf # logloss not being an upper bound on the indicator function. weighted_loss = weights * losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type=surrogate_type, positive_weights=1.0, negative_weights=lambdas) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) lambda_term = lambdas * target_rate * (1.0 - label_priors) * maybe_log2 loss = tf.reshape(weighted_loss - lambda_term, original_shape) other_outputs = { 'lambdas': lambdas_variable,
tensorflow.log
5,270
from tensorflow.python.framework import ops noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. seed: A Python integer. Used to create random seeds. See [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed) for behavior. name: A name for this operation (optional). Returns: A Tensor of the same shape of `x`. Raises: ValueError: If `keep_prob` is not in `(0, 1]`. """ with ops.op_scope([x], name, "dropout") as name: x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, float) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor( keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) noise_shape = noise_shape or array_ops.shape(x) # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob random_tensor += random_ops.random_uniform(
tensorflow.python.framework.ops.op_scope
5,271
import tensorflow as tf tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width])
tensorflow.reshape
5,272
from tensorflow.contrib.framework.python.ops import variables as contrib_variables self._monitors = monitors def begin(self): self._last_step = None self._global_step_tensor = contrib_variables.get_global_step() for m in self._monitors: m.begin(max_steps=None)
tensorflow.contrib.framework.python.ops.variables.get_global_step
5,273
import tensorflow as tf with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) # Training and validation for step in range(FLAGS.max_steps): # Training: Backpropagation using train set
tensorflow.local_variables_initializer
5,274
import tensorflow as tf layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_a = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_a = tf.nn.rnn_cell.DropoutWrapper(lstm_a, output_keep_prob=self.keep_prob) state_init_a = lstm_a.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_ain = tf.expand_dims(layer_a2, axis=1) out_a, state_final_a = tf.nn.dynamic_rnn(cell=lstm_a, inputs=lstm_ain, initial_state=state_init_a) cell_out_a = tf.reshape(out_a, [-1, 256]) mu = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
tensorflow.expand_dims
5,275
import tensorflow as tf tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1)
tensorflow.where
5,276
import tensorflow as tf lm_emb = tf.stack([tf.concat([word_emb, word_emb], -1), lm_embeddings["lstm_outputs1"], lm_embeddings["lstm_outputs2"]], -1) # [num_sentences, max_sentence_length, 1024, 3] lm_emb_size = util.shape(lm_emb, 2) lm_num_layers = util.shape(lm_emb, 3) with tf.variable_scope("lm_aggregation"): self.lm_weights = tf.nn.softmax(tf.get_variable("lm_scores", [lm_num_layers], initializer=tf.constant_initializer(0.0))) self.lm_scaling = tf.get_variable("lm_scaling", [], initializer=tf.constant_initializer(1.0)) flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers]) flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights, 1)) # [num_sentences * max_sentence_length * emb, 1] aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size]) aggregated_lm_emb *= self.lm_scaling context_emb_list.append(aggregated_lm_emb) context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb] head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb] context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length]
tensorflow.reshape
5,277
import tensorflow as tf # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for # a more detailed discussion. def perturb_vars(original_scope, perturbed_scope): all_vars = U.scope_vars(U.absolute_scope_name("q_func")) all_perturbed_vars = U.scope_vars(U.absolute_scope_name("perturbed_q_func")) assert len(all_vars) == len(all_perturbed_vars) perturb_ops = [] for var, perturbed_var in zip(all_vars, all_perturbed_vars): if param_noise_filter_func(perturbed_var): # Perturb this variable. op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale)) else: # Do not perturb, just assign. op = tf.assign(perturbed_var, var) perturb_ops.append(op) assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops) # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy # of the network and measures the effect of that perturbation in action space. If the perturbation # is too big, reduce scale of perturbation, otherwise increase. q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func") perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func") kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1) mean_kl = tf.reduce_mean(kl) def update_scale():
tensorflow.assign
5,278
import tensorflow as tf bert_config, model.get_pooled_output(), next_sentence_labels) total_loss = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None
tensorflow.train.Scaffold
5,279
import tensorflow as tf Args: var: tensor to regularize. weight_l1: an optional weight to modulate the l1 loss. weight_l2: an optional weight to modulate the l2 loss. name: Optional scope/name for op_scope. Returns: the l1+L2 loss op. """ with tf.name_scope(name): weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1') weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2') reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1') reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2') return tf.add(reg_l1, reg_l2, name='value') def l1_regularizer(scale, name='l1_regularizer'): """Returns a function that can be used to apply L1 regularization to weights. L1 regularization encourages sparsity. Args: scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer. name: An optional name/scope name. Returns:
tensorflow.nn.l2_loss
5,280
import tensorflow as tf self.assertEqual((2, 2), res[0][0].h.shape) self.assertEqual((2, 2), res[0][1].c.shape) self.assertEqual((2, 2), res[0][1].h.shape) # pylint: disable=unused-variable,invalid-name def testDynamicAttentionDecoderStateIsTuple(self): with self.test_session() as sess: with tf.variable_scope( "root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2, state_is_tuple=True) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(2, len(res[0])) self.assertEqual((2, 2), res[0][0].c.shape)
tensorflow.reshape
5,281
import tensorflow as tf if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name))
tensorflow.gfile.MakeDirs
5,282
import tensorflow as tf if not full_cov: fvar = tf.transpose(fvar) # N x R
tensorflow.transpose
5,283
import tensorflow as tf # ddi ddi_dataset = dataset.batch(n_ddi_batch) ddi_batch = ddi_dataset.make_one_shot_iterator().get_next() # post processing im = self.post_process(training_batch) ddi_im = self.post_process(ddi_batch) self.im = im self.ddi_im = ddi_im def data_map(self, img_path): n_bits = config.model.data.n_bits n_bins = 2**n_bits rgb = tf.image.decode_png(tf.read_file(img_path), channels=3, dtype=tf.uint8) h = config.model.data.dimensions.h w = config.model.data.dimensions.w c = config.model.data.dimensions.c # rgb.set_shape([h,w,c]) # don't set because going to crop anyway # crop for lsun 96, see realnvp and glow for specifics rgb = tf.image.random_crop(rgb,size=[h,w,c]) # crop for patch training crop_h = h//self.crop_factor crop_w = w//self.crop_factor rgb = tf.image.random_crop(rgb,size=[crop_h,crop_w,c])
tensorflow.read_file
5,284
import tensorflow as tf # Run the decoder RNN cell. cell_output = decoder state cell_output, state_t = self.cell(x, state_t_1) context_t, attn_dist, coverage_t = self.attention(state_t, options.attention_vec_size, encoder_states, encoder_features, passage_mask, v, w_c=w_c, use_coverage=options.use_coverage, coverage=coverage_t_1) # Calculate p_gen, Equation (8) if options.pointer_gen: with tf.variable_scope('calculate_pgen'): p_gen = linear([context_t, state_t.c, state_t.h, x], 1, True) # [batch_size, 1] p_gen = tf.sigmoid(p_gen) # Concatenate the cell_output (= decoder state) and the context vector, and pass them through a linear layer # This is V[s_t, h*_t] + b in the paper with variable_scope.variable_scope("AttnOutputProjection"): output_t = linear([cell_output] + [context_t], options.gen_hidden_size, True)
tensorflow.variable_scope
5,285
import tensorflow as tf attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs]) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 dec, mem = tf.nn.seq2seq.attention_decoder( dec_inp, enc_state, attn_states, cell, output_size=4, num_heads=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape)
tensorflow.global_variables_initializer
5,286
import tensorflow as tf c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u)
tensorflow.nn.sigmoid
5,287
import tensorflow as tf ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp)
tensorflow.reduce_sum
5,288
import tensorflow as tf mask_c = tf.expand_dims(self.c_mask, 2) S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask=mask_c), dim=1), (0, 2, 1)) self.c2q = tf.matmul(S_, self.q_embed_encoding) self.q2c = tf.matmul(tf.matmul(S_, S_T), self.c_embed_encoding) self.attention_outputs = [self.c_embed_encoding, self.c2q, self.c_embed_encoding * self.c2q, self.c_embed_encoding * self.q2c] N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1] with tf.variable_scope("Model_Encoder_Layer"): inputs = tf.concat(self.attention_outputs, axis=-1) self.enc = [conv(inputs, d, name="input_projection")] for i in range(3): if i % 2 == 0: self.enc[i] = tf.nn.dropout(self.enc[i], 1.0 - self.dropout) self.enc.append( residual_block(self.enc[i], num_blocks=1, num_conv_layers=2, kernel_size=5, mask=self.c_mask, num_filters=d, num_heads=nh, seq_len=self.c_len, scope="Model_Encoder", bias=False, reuse=True if i > 0 else None, dropout=self.dropout) )
tensorflow.nn.dropout
5,289
import tensorflow as tf cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, onehot_labels, name="xentropy") loss = tf.reduce_mean(cross_entropy, name="xentropy_mean") tf.scalar_summary(loss.op.name, loss)
tensorflow.reduce_mean
5,290
import tensorflow as tf # ------------------------------------------------ # Recurrent Dale's law weight matrix: self.Dale_rec = tf.get_variable('Dale_rec', [N_rec, N_rec], initializer=tf.constant_initializer(self.dale_rec), trainable=False) # Output Dale's law weight matrix: self.Dale_out = tf.get_variable('Dale_out', [N_rec, N_rec], initializer=tf.constant_initializer(self.dale_out), trainable=False) # Connectivity weight matrices: self.input_Connectivity = tf.get_variable('input_Connectivity', [N_rec, N_in], initializer=tf.constant_initializer( self.input_connectivity_mask), trainable=False)
tensorflow.constant_initializer
5,291
import tensorflow as tf if len(input_dims) == 4: _, input_h, input_w, num_channels = input_dims in_dim = input_h * input_w * num_channels flat_input = tf.reshape(input_data, [-1, in_dim]) else: in_dim = input_dims[-1] flat_input = input_data if initial_value is None: fc_weight = tf.get_variable("weights", shape=[in_dim, out_dim], initializer=tf.random_normal_initializer(mean=0., stddev=0.01)) fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=tf.constant_initializer(0.0)) else: fc_weight = tf.get_variable("weights", initializer=initial_value[0]) fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=initial_value[1]) if use_bias: output = tf.add(tf.matmul(flat_input, fc_weight), fc_bias) else: output = tf.matmul(flat_input, fc_weight) if non_linear_fn is None: return output else: activation = non_linear_fn(output)
tensorflow.get_variable
5,292
import tensorflow as tf def contra_traj_lossV6(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool)
tensorflow.reshape
5,293
import tensorflow as tf print(x_prob.get_shape(), y.get_shape(), tf.reduce_sum(x_prob * y, axis=-1).get_shape()) return -tf.reduce_sum(x_prob * y, axis=-1) # higher the better def mse(x, y): x = x - tf.reduce_mean(x, axis=-1, keepdims=True) y = y - tf.reduce_mean(y, axis=-1, keepdims=True) return tf.reduce_sum((x-y)**2, axis=-1) # lower the better
tensorflow.reduce_mean
5,294
import tensorflow as tf # p = tf.print('cur_loss', [final_loss, avg_loss]) # with tf.control_dependencies([p]): # avg_loss = tf.identity(avg_loss) # print(final_loss, avg_loss) # p = tf.print('debug loss ', [final_loss, avg_loss]) # with tf.control_dependencies([p]): # avg_loss = 1. * avg_loss # print(avg_loss) # exit() return avg_loss def compute_contra_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0):
tensorflow.where
5,295
import tensorflow as tf checkpoints""") tf.flags.DEFINE_string('train_dir', None, """Path to session checkpoints.""") tf.flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval', """Directory where to write eval event logs.""") tf.flags.DEFINE_string('pretrain_dir', None,
tensorflow.flags.DEFINE_string
5,296
import tensorflow as tf if self.config["coarse_to_fine"]: top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c) else: top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(top_span_emb, top_span_mention_scores, c) dummy_scores = tf.zeros([k, 1]) # [k, 1] for i in range(self.config["coref_depth"]): with tf.variable_scope("coref_layer", reuse=(i > 0)): top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb] top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c] top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
tensorflow.zeros
5,297
import tensorflow as tf step = tf.to_float(global_step) warmup_steps = tf.to_float(params.warmup_steps) multiplier = params.hidden_size ** -0.5 decay = params.r0 * multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.0) * (warmup_steps ** -0.5), (step + 1) ** -0.5) return learning_rate * decay elif params.learning_rate_decay == "rnnplus_warmup_decay": step = tf.to_float(global_step) n = float(len(params.device_list)) warmup_steps = tf.to_float(params.warmup_steps) decay = tf.minimum(1 + step * (n - 1) / (n * warmup_steps), tf.minimum(n, n * ((2*n) ** ((params.s - n * step) / (params.e - params.s))))) return tf.maximum(learning_rate * decay, 5e-6) elif params.learning_rate_decay == "piecewise_constant": return tf.train.piecewise_constant(tf.to_int32(global_step), params.learning_rate_boundaries, params.learning_rate_values) elif params.learning_rate_decay == "none": return learning_rate
tensorflow.to_float
5,298
import tensorflow as tf def gradient_difference_loss(x, y): x_h_diff = x[:, 1:] - x[:, :-1] x_w_diff = x[:, :, 1:] - x[:, :, :-1] y_h_diff = y[:, 1:] - y[:, :-1] y_w_diff = y[:, :, 1:] - y[:, :, :-1] h_diff = tf.abs(tf.abs(x_h_diff) - tf.abs(y_h_diff)) w_diff = tf.abs(tf.abs(x_w_diff) - tf.abs(y_w_diff)) return h_diff + tf.transpose(w_diff) def leaky_relu(x, leak=0.2, name='leaky_relu'): with tf.variable_scope(name):
tensorflow.abs
5,299