seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf stride = strides[i] anchors = anchorlist[mask[i]] x_shape = tf.shape(logits) logits = tf.reshape(logits, (x_shape[0], x_shape[1], x_shape[2], len(anchors), num_classes + 5)) box_xy, box_wh, obj, cls = tf.split(logits, (2, 2, 1, num_classes), axis=-1) box_xy = tf.sigmoid(box_xy) obj = tf.sigmoid(obj) cls = tf.sigmoid(cls) anchors = anchors.astype(np.float32) grid_shape = x_shape[1:3] # print(grid_shape) grid_h, grid_w = grid_shape[0], grid_shape[1]
tensorflow.sigmoid
4,600
import tensorflow as tf tt_rank = np.ones(shape[0].size + 1) with tf.name_scope(name): tt_cores = [None] * num_dims
tensorflow.name_scope
4,601
import tensorflow as tf new_shape = [1, 1, 1, channnel] if ndims == 2: new_shape = [1, channnel] if use_bias: beta = tf.get_variable('beta', [channnel], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) else: beta = tf.zeros([1] * ndims, name='beta') if use_scale: gamma = tf.get_variable('gamma', [channnel], initializer=tf.constant_initializer(1.0)) gamma = tf.reshape(gamma, new_shape)
tensorflow.reshape
4,602
import tensorflow as tf padding="same", stride=1 ) x = self.__batch_norm("{}2b".format(bn_name_base), x) x = tf.nn.relu(x) x = self.__conv2d(
tensorflow.nn.relu
4,603
import tensorflow as tf shared_samples: a tensor of shape [num_samples, num_features]. weight: the weight of the incoherence loss. name: the name of the tf summary. """ with tf.name_scope(name): private_samples -= tf.reduce_mean(private_samples, 0) shared_samples -= tf.reduce_mean(shared_samples, 0) private_samples = tf.nn.l2_normalize(private_samples, 1) shared_samples = tf.nn.l2_normalize(shared_samples, 1) correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True) cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight cost = tf.where(cost > 0, cost, 0, name='value') assert_op = tf.Assert(tf.is_finite(cost), [cost]) with tf.control_dependencies([assert_op]): barrier = tf.no_op(name) return cost
tensorflow.matmul
4,604
from tensorflow.python.ops import parsing_ops 'image/encoded': parsing_ops.FixedLenFeature(
tensorflow.python.ops.parsing_ops.FixedLenFeature
4,605
import tensorflow as tf my_tensor = tf.zeros([1,20]) sess.run(my_tensor) my_var = tf.Variable(tf.zeros([1,20])) sess.run(my_var.initializer) sess.run(my_var) row_dim = 2 col_dim = 3 zero_var = tf.Variable(tf.zeros([row_dim, col_dim])) ones_var = tf.Variable(tf.ones([row_dim, col_dim])) sess.run(zero_var.initializer) sess.run(ones_var.initializer) print(sess.run(zero_var)) print(sess.run(ones_var)) zero_similar = tf.Variable(tf.zeros_like(zero_var)) ones_similar = tf.Variable(tf.ones_like(ones_var)) sess.run(ones_similar.initializer) sess.run(zero_similar.initializer) print(sess.run(ones_similar)) print(sess.run(zero_similar))
tensorflow.ones
4,606
import tensorflow as tf # record pruning masks & operations masks += [mask] prune_ops += [prune_op] return masks, tf.group(prune_ops) def __calc_prune_ratio_dyn(self, prune_ratio_fnl): """Calculate the dynamic pruning ratio.
tensorflow.group
4,607
import tensorflow as tf # Semi-Supervised Classification Phase with tf.variable_scope(tf.get_variable_scope()): encoder_output_label_, _ = encoder(x_input_l, reuse=True, supervised=True) # Generate output images with tf.variable_scope(tf.get_variable_scope()): decoder_image = decoder(manual_decoder_input, reuse=True) # Classification accuracy of encoder correct_pred = tf.equal(tf.argmax(encoder_output_label_, 1), tf.argmax(y_input, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Autoencoder loss autoencoder_loss = tf.reduce_mean(tf.square(x_target - decoder_output)) # Gaussian Discriminator Loss dc_g_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_g_real), logits=d_g_real)) dc_g_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_g_fake), logits=d_g_fake)) dc_g_loss = dc_g_loss_fake + dc_g_loss_real # Categorical Discrimminator Loss dc_c_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_c_real), logits=d_c_real)) dc_c_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_c_fake), logits=d_c_fake))
tensorflow.square
4,608
import tensorflow as tf if mode == tf.estimator.ModeKeys.TRAIN: tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] tf_update_ops.append(tf.assign_add(global_step, 1)) # tf.logging.info("tf_update_ops: {}".format(tf_update_ops)) train_op = tf.group(tf_update_ops) with mtf.utils.outside_all_rewrites(): # Copy master variables to slices. Must be called first. restore_hook = mtf.MtfRestoreHook(lowering) saver = tf.train.Saver( tf.global_variables(), sharded=True, max_to_keep=10, keep_checkpoint_every_n_hours=2, defer_build=False, save_relative_paths=True) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) saver_listener = mtf.MtfCheckpointSaverListener(lowering) saver_hook = tf.train.CheckpointSaverHook( hparams.model_dir,
tensorflow.global_variables
4,609
import tensorflow as tf 'mask_loss', tf.reduce_mean(mask_loss), step=global_step) tf.contrib.summary.scalar( 'learning_rate', tf.reduce_mean(learning_rate), step=global_step)
tensorflow.reduce_mean
4,610
import tensorflow as tf if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tensorflow.random_normal
4,611
import tensorflow as tf update_slot_op = tf.group(update_counter, *ops, name='update_slot') def update_grad(): update_op = self._opt.apply_gradients(slots_and_vars) with tf.control_dependencies([update_op]): clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots] return tf.group(*clear_ops, name='update_grad') pred = tf.equal(tf.mod(counter, self._niter), 0) with tf.control_dependencies([update_slot_op]): if name is None: name = 'cond_update_grad' op = tf.cond(pred, update_grad, tf.no_op, name=name).op return op if __name__ == '__main__': # run it with "python -m tensorpack.tfutils.optimizer" x = tf.get_variable('x', shape=[6]) cost = tf.reduce_sum(tf.abs(x), name='cost') opt = tf.train.GradientDescentOptimizer(0.01) opt = AccumGradOptimizer(opt, 5) min_op = opt.minimize(cost)
tensorflow.cond
4,612
import tensorflow as tf crit_dis = tf.reduce_mean(tf.square(crit_real - tf.ones_like(crit_real))) + tf.reduce_mean(tf.square(crit_fake - tf.zeros_like(crit_fake))) crit_gen = tf.reduce_mean(tf.square(crit_fake - tf.ones_like(crit_fake))) rep_loss = tf.reduce_mean(tf.square(pred - x2d)) KK = tf.matmul(K, K, transpose_b=True)
tensorflow.square
4,613
import tensorflow as tf [num_batch * num_prior, 1]) # 3. classification loss including positive and negative examples loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg) loss_class_mask_b = tf.broadcast_to(loss_class_mask, tf.shape(class_pred)) filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32), loss_class_mask) filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b) filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class]) loss_class = tf.keras.losses.sparse_categorical_crossentropy( y_true=filter_class_true, y_pred=filter_class_pred)
tensorflow.cast
4,614
import tensorflow as tf self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights') hidden_size = model_params['model_hidden_size'] proj_size = model_params['model_proj_size'] # optional, can be None def GetCell(): """Creates an LSTM cell with dropout.""" c = tf.nn.rnn_cell.LSTMCell(hidden_size, use_peepholes=model_params['peepholes'], num_proj=proj_size) if dropout_keep_prob is not None: c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob) return c # Create the bi-directional LSTM with tf.variable_scope('wordrnn'): with tf.variable_scope('fw'): cell_fw = GetCell() with tf.variable_scope('bw'): cell_bw = GetCell() rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs, dtype=tf.float32,
tensorflow.nn.rnn_cell.DropoutWrapper
4,615
import tensorflow as tf """ reg_l2 = tf.keras.regularizers.l2(5e-7) if padding == 'SYMMETRIC' or padding == 'REFLECT': p = (kernel_size - 1) // 2 x = tf.pad(x, [[0,0],[p,p],[p,p], [p,p],[0,0]], padding) x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x) else: assert padding in ['SAME', 'VALID'] x = tf.keras.layers.Conv3D(filters, kernel_size, activation=activation, kernel_initializer=initialization, use_bias=use_bias, kernel_regularizer=reg_l2)(x) return x
tensorflow.keras.layers.Conv3D
4,616
import tensorflow as tf masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] _, resized_masks, _ = image_resizer_fn(image, masks) if use_bfloat16: resized_masks = tf.cast(resized_masks, tf.bfloat16) tensor_dict[fields.InputDataFields. groundtruth_instance_masks] = resized_masks # Transform groundtruth classes to one hot encodings. label_offset = 1 zero_indexed_groundtruth_classes = tensor_dict[ fields.InputDataFields.groundtruth_classes] - label_offset tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( zero_indexed_groundtruth_classes, num_classes) if use_multiclass_scores: tensor_dict[fields.InputDataFields.groundtruth_classes] = tensor_dict[ fields.InputDataFields.multiclass_scores] tensor_dict.pop(fields.InputDataFields.multiclass_scores, None) if fields.InputDataFields.groundtruth_confidences in tensor_dict: groundtruth_confidences = tensor_dict[ fields.InputDataFields.groundtruth_confidences]
tensorflow.one_hot
4,617
import tensorflow as tf with tf.variable_scope('C_train'): self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=GLOBAL_STEP) with tf.variable_scope('a_grad'): self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim) def _build_net(self, s, a, scope, trainable): with tf.variable_scope(scope): init_w = tf.random_normal_initializer(0., 0.01) init_b = tf.constant_initializer(0.01) with tf.variable_scope('l1'): n_l1 = 700 # combine the action and states together in this way w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable) w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable) b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable) net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1) with tf.variable_scope('l2'): net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable) with tf.variable_scope('q'): q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
tensorflow.variable_scope
4,618
import tensorflow as tf conv1 = tf.layers.conv2d(x, filters=32*amp_factor, kernel_size=[5, 3], data_format='channels_last', padding= "same", strides=(2, 1), activation=tf.nn.relu) pool1 = conv1 conv2 = tf.layers.conv2d(pool1, filters=64*amp_factor, kernel_size=[5, 1], data_format='channels_last', padding= "same", strides=(2, 1), activation=tf.nn.relu) pool2 = conv2
tensorflow.layers.conv2d
4,619
from tensorflow.contrib.distributions.python.ops import distribution_util @distribution_util.AppendDocstring( """Note: when `rate` is an integer, there are actually two modes: `rate` and `rate - 1`. In this case we return the larger, i.e., `rate`.""") def _mode(self): return math_ops.floor(self.rate) def _assert_valid_sample(self, x, check_integer=True): if not self.validate_args: return x dependencies = [check_ops.assert_non_negative(x)] if check_integer: dependencies += [distribution_util.assert_integer_form( x, message="x has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, x)
tensorflow.contrib.distributions.python.ops.distribution_util.assert_integer_form
4,620
import tensorflow as tf assert isinstance(params, (dict, params_dict.ParamsDict)) if isinstance(params, dict): params = params_dict.ParamsDict(params) self._params = params def __call__(self, global_step): global_step = tf.cast(global_step, dtype=tf.float32) warmup_lr = self._params.warmup_learning_rate warmup_steps = self._params.warmup_steps init_lr = self._params.init_learning_rate total_steps = self._total_steps linear_warmup = (
tensorflow.cast
4,621
from tensorflow.python.framework import ops start_sum = start_sum if start_sum else ( array_ops.zeros((), dtype=dtypes.int32, name="zero"),) if (x.get_shape().ndims is not None and self._is_all_constant_helper(size, *start_sum)): start = sum(tensor_util.constant_value(s) for s in start_sum) stop = start + tensor_util.constant_value(size) slice_ = x.get_shape()[start:stop].as_list() if all(s is not None for s in slice_): return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name) # Fall-through intended. return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,)) sample_ndims = self.get_sample_ndims(x, name=name) return (slice_shape((), sample_ndims, name="sample_shape"), slice_shape((sample_ndims,), self.batch_ndims, name="batch_shape"),
tensorflow.python.framework.ops.convert_to_tensor
4,622
import tensorflow as tf else: gru=tf.nn.rnn_cell.GRUCell(state_size) cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) else: if activation == 'linear': cell_basic = tf.contrib.rnn.BasicRNNCell(state_size,activation=tf.identity) cell_drop=tf.contrib.rnn.DropoutWrapper(cell_basic,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob) elif activation == 'relu': cell_basic = tf.contrib.rnn.BasicRNNCell(state_size, activation=tf.nn.relu) cell_drop = tf.contrib.rnn.DropoutWrapper(cell_basic, variational_recurrent=True, dtype=tf.float32, input_size=num_input, input_keep_prob=input_prob, state_keep_prob=state_prob) else: #tanh by default cell_basic = tf.contrib.rnn.BasicRNNCell(state_size) cell_drop = tf.contrib.rnn.DropoutWrapper(cell_basic, variational_recurrent=True, dtype=tf.float32, input_size=num_input, input_keep_prob=input_prob, state_keep_prob=state_prob) return cell_drop """Wrap the cell in multilayer""" cell=tf.nn.rnn_cell.MultiRNNCell([get_a_cell(state_size,input_prob,state_prob,input_size_x if layer==0 else state_size) for layer in range(num_layers)],state_is_tuple=True) cell=tf.nn.rnn_cell.DropoutWrapper(cell,variational_recurrent=True,dtype=tf.float32,input_size=input_size_x,output_keep_prob=output_prob) init_state = cell.zero_state(batch_size, dtype=tf.float32) """Build dynamic graph""" rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state)
tensorflow.contrib.rnn.BasicRNNCell
4,623
import tensorflow as tf predicted_poses)]: for i in range(classes.shape[0]): sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) occupancy_value = tf.math.sign(tf.nn.relu(interpolated + self.tol)) sdf_values += occupancy_value
tensorflow.reshape
4,624
import tensorflow as tf tf.app.flags.DEFINE_float( 'decay_steps', 1000, 'Number of epochs after which learning rate decays.') # for learning rate piecewise_constant decay tf.app.flags.DEFINE_string( 'decay_boundaries', '60000, 800000', 'Learning rate decay boundaries by global_step (comma-separated list).') tf.app.flags.DEFINE_string( 'lr_decay_factors', '1, 0.6, 0.1', 'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).') # checkpoint related configuration tf.app.flags.DEFINE_string( 'checkpoint_path', './model/resnet50',#None, 'The path to a checkpoint from which to fine-tune.')
tensorflow.app.flags.DEFINE_string
4,625
import tensorflow as tf bg_row=tf.shape(b_grads[0])[0] bg_col=tf.shape(b_grads[0])[1] b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col)) if adj_b:
tensorflow.reshape
4,626
import tensorflow as tf batch_size=batch_size) num_dims = shape.size if tt_rank.size == 1: tt_rank = tt_rank * np.ones(num_dims - 1) tt_rank = np.insert(tt_rank, 0, 1) tt_rank = np.append(tt_rank, 1) tt_rank = tt_rank.astype(int) tt_cores = [None] * num_dims with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (batch_size, tt_rank[i], shape[i], tt_rank[i + 1]) tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrainBatch(tt_cores, shape, tt_rank, batch_size) def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32, name='t3f_matrix_with_random_cores'): """Generate a TT-matrix of given shape with N(mean, stddev^2) cores.
tensorflow.random_normal
4,627
import tensorflow as tf with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e) x = x*g+b return x
tensorflow.nn.sigmoid
4,628
import tensorflow as tf tf.flags.DEFINE_boolean('staged_vars', False, """whether the variables are staged from the main computation""") tf.flags.DEFINE_boolean('force_gpu_compatible', True, """whether to enable force_gpu_compatible in GPU_Options""")
tensorflow.flags.DEFINE_boolean
4,629
import tensorflow as tf var_grads = mtf.gradients( [loss], [v.outputs[0] for v in graph.trainable_variables]) lr = learning_rate.learning_rate_schedule(hparams) tf.summary.scalar("learning_rate", lr) mtf_lr = mtf.import_tf_tensor( mesh, tf.convert_to_tensor(lr, dtype=tf.float32), mtf.Shape([])) optimizer = mtf.optimize.make_optimizer(hparams, mtf_lr) update_ops = [] for grad, var in zip(var_grads, graph.trainable_variables): update_ops.extend(optimizer.apply_grad(grad, var))
tensorflow.convert_to_tensor
4,630
import tensorflow as tf assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tensorflow.logging.info
4,631
import tensorflow as tf model = MyModel(vocab_size, embedding_dim, units, BATCH_SIZE) optimizer = tf.optimizers.Adam()
tensorflow.optimizers.Adam
4,632
from tensorflow.python.framework import constant_op @property def rate(self): """Rate parameter.""" return self._rate def _batch_shape_tensor(self): return array_ops.shape(self.rate) def _batch_shape(self): return self.rate.get_shape() def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() @distribution_util.AppendDocstring(_poisson_sample_note) def _log_prob(self, x): return self._log_unnormalized_prob(x) - self._log_normalization() @distribution_util.AppendDocstring(_poisson_sample_note) def _prob(self, x): return math_ops.exp(self._log_prob(x))
tensorflow.python.framework.constant_op.constant
4,633
import tensorflow as tf init = tf.global_variables_initializer() # Reshape immages to display them input_images = tf.reshape(x_input, [-1, 28, 28, 1]) generated_images = tf.reshape(decoder_output, [-1, 28, 28, 1]) # Tensorboard visualization tf.summary.scalar(name='Autoencoder Loss', tensor=autoencoder_loss) tf.summary.scalar(name='Discriminator gauss Loss', tensor=dc_g_loss) tf.summary.scalar(name='Discriminator categorical Loss', tensor=dc_c_loss) tf.summary.scalar(name='Generator Loss', tensor=generator_loss) tf.summary.scalar(name='Supervised Encoder Loss', tensor=supervised_encoder_loss) tf.summary.histogram(name='Encoder Gauss Distribution', values=encoder_output_latent) tf.summary.histogram(name='Real Gauss Distribution', values=real_distribution) tf.summary.histogram(name='Encoder Categorical Distribution', values=encoder_output_label)
tensorflow.summary.scalar
4,634
import tensorflow as tf if dropout: h = tf.nn.dropout(h, 0.5) h_logits = tf.matmul(h, w_h) + b_h if self.ctx2out: w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer) h_logits += tf.matmul(context, w_ctx2out) if self.prev2out: h_logits += x h_logits = tf.nn.tanh(h_logits) if dropout: h_logits = tf.nn.dropout(h_logits, 0.5) out_logits = tf.matmul(h_logits, w_out) + b_out return out_logits def _batch_norm(self, x, mode='train', name=None): return tf.contrib.layers.batch_norm(inputs=x, decay=0.95, center=True, scale=True, is_training=(mode=='train'), updates_collections=None, scope=(name+'batch_norm'))
tensorflow.nn.dropout
4,635
import tensorflow as tf def gaussian_pdf(x, mean, std): val = tf.div(tf.exp(-tf.pow((x - mean) / std, 2) / two), (sqrt2pi * std), name="gaussian_pdf") return val # // --- Build Argus background PDF --- # RooRealVar argpar("argpar","argus shape parameter",-20.0,-100.,-1.) ; # RooConstVar m0("m0", "resonant mass", 5.291); argpar = tf.Variable(argpar_num, name="argpar", dtype=tf.float64) m0 = tf.constant(m0_num, name="m0", dtype=tf.float64) vdict['argpar'] = argpar # RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ; def argus_pdf(m, m0, c, p=0.5): t = m / m0 u = 1 - t * t argus_t_ge_1 = m * tf.pow(u, p) * tf.exp(c * u) return tf.maximum(tf.zeros_like(m), argus_t_ge_1, name="argus_pdf")
tensorflow.constant
4,636
import tensorflow as tf t_flatten = tf.reshape(t, shape=(-1,)) uniques, index = tf.unique(t_flatten)
tensorflow.unique
4,637
import tensorflow as tf result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key])))
tensorflow.logging.info
4,638
import tensorflow as tf def construct_placeholders(edge_types): placeholders = { 'batch': tf.placeholder(tf.int32, name='batch'), 'batch_neg': tf.placeholder(tf.int32, name='batch_neg'), 'batch_node':tf.placeholder(tf.int32,name = 'batch_node'), 'adj_min_batch': tf.placeholder(tf.float32,name='adj_min_batch'), 'sim_min_batch': tf.placeholder(tf.float32,name='sim_min_batch'), 'batch_edge_type_idx': tf.placeholder(tf.int32, shape=(), name='batch_edge_type_idx'), 'batch_row_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_row_edge_type'), 'batch_col_edge_type': tf.placeholder(tf.int32, shape=(), name='batch_col_edge_type'), 'degrees': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), } placeholders.update({ 'adj_mats_%d,%d,%d' % (i, j, k): tf.sparse_placeholder(tf.float32) for i, j in edge_types for k in range(edge_types[i,j])}) placeholders.update({ 'feat_%d' % i: tf.sparse_placeholder(tf.float32) for i, _ in edge_types})
tensorflow.placeholder
4,639
import tensorflow as tf bboxes_urx, bboxes_ury) = get_width_upright(bboxes) (gt_boxes_width, gt_boxes_height, gt_boxes_urx, gt_boxes_ury) = get_width_upright(gt_boxes) if variances is None: variances = [1., 1.] targets_dx = (gt_boxes_urx - bboxes_urx)/(bboxes_width * variances[0]) targets_dy = (gt_boxes_ury - bboxes_ury)/(bboxes_height * variances[0]) targets_dw = tf.log(gt_boxes_width / bboxes_width) / variances[1] targets_dh = tf.log(gt_boxes_height / bboxes_height) / variances[1] targets = tf.concat( [targets_dx, targets_dy, targets_dw, targets_dh], axis=1) return targets def decode(roi, deltas, variances=None): with tf.name_scope('BoundingBoxTransform/decode'): (roi_width, roi_height, roi_urx, roi_ury) = get_width_upright(roi) dx, dy, dw, dh = tf.split(deltas, 4, axis=1) if variances is None:
tensorflow.concat
4,640
import tensorflow as tf the model. Most often this is x_{t-1}, the previous token in the observation sequence. Returns: rnn_out: The output of the RNN. rnn_state: The new state of the RNN. """ inputs_encoded = self.data_encoder(tf.to_float(inputs)) rnn_inputs = tf.concat([inputs_encoded, prev_latent_encoded], axis=1) rnn_out, rnn_state = self.rnn_cell(rnn_inputs, prev_rnn_state) return rnn_out, rnn_state def transition(self, rnn_out): """Computes the transition distribution p(z_t|h_t).
tensorflow.concat
4,641
import tensorflow as tf return mu,sigma; class PolicyEstimator_MountainCarContinuous(): def __init__(self, entropy_beta=0.1, learning_rate=0.001, par_idx=0,scope="policy_estimator"): w_init = tf.random_normal_initializer(0.,.1); with tf.variable_scope(scope+"_"+str(par_idx)): # state, target and action
tensorflow.random_normal_initializer
4,642
import tensorflow as tf :param n1: no. of input neurons :param n2: no. of output neurons :param name: name of the entire dense layer.i.e, variable scope name. :return: tensor with shape [batch_size, n2] """ with tf.variable_scope(name, reuse=None): weights = tf.get_variable("weights", shape=[n1, n2], initializer=tf.random_normal_initializer(mean=0., stddev=0.01)) bias = tf.get_variable("bias", shape=[n2], initializer=tf.constant_initializer(0.0)) out = tf.add(tf.matmul(x, weights), bias, name='matmul') return out # The autoencoder network
tensorflow.random_normal_initializer
4,643
import tensorflow as tf one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params):
tensorflow.reshape
4,644
import tensorflow as tf # Imports from meta_graph. tf.train.import_meta_graph(filename) # Retrieves SAVERS collection. Verifies there are 2 entries. savers = tf.get_collection("savers") self.assertEqual(2, len(savers)) # Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
tensorflow.get_collection
4,645
import tensorflow as tf num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64),
tensorflow.FixedLenFeature
4,646
import tensorflow as tf span_emb_list = [] span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb] span_emb_list.append(span_start_emb) span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb] span_emb_list.append(span_end_emb) span_width = 1 + span_ends - span_starts # [k]
tensorflow.gather
4,647
import tensorflow as tf 'data_dir', '../PASCAL/VOC_TF/VOC0712TF/', 'The directory where the dataset input data is stored.') tf.app.flags.DEFINE_string( 'dataset_name', 'pascalvoc_0712', 'The name of the dataset to load.') tf.app.flags.DEFINE_integer( 'num_classes', 21, 'Number of classes to use in the dataset.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'train', 'The name of the train/test split.') tf.app.flags.DEFINE_string( 'model_dir', './logs_v3/', 'The directory where the model will be stored.') tf.app.flags.DEFINE_integer( 'log_every_n_steps', 10, 'The frequency with which logs are print.') tf.app.flags.DEFINE_integer( 'save_summary_steps', 500, 'The frequency with which summaries are saved, in seconds.') tf.app.flags.DEFINE_integer( 'save_checkpoints_secs', 7200, 'The frequency with which the model is saved, in seconds.') # model related configuration tf.app.flags.DEFINE_integer(
tensorflow.app.flags.DEFINE_integer
4,648
import tensorflow as tf elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, logits, label_ids): """Computes the loss and accuracy of the model.""" sentence_log_probs = tf.reshape( logits, [-1, logits.shape[-1]]) sentence_predictions = tf.argmax( logits, axis=-1, output_type=tf.int32) sentence_labels = tf.reshape(label_ids, [-1]) sentence_accuracy = tf.metrics.accuracy( labels=label_ids, predictions=sentence_predictions) sentence_mean_loss = tf.metrics.mean( values=per_example_loss) sentence_f = tf_metrics.f1(label_ids, sentence_predictions, num_labels, label_lst, average="macro") eval_metric_ops = { "f1": sentence_f, "acc":sentence_accuracy } return eval_metric_ops
tensorflow.metrics.mean
4,649
import tensorflow as tf label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1])
tensorflow.reshape
4,650
import tensorflow as tf self.train_data['T'], self.train_data['failures'], \ self.train_data['atrisk'], self.train_data['ties'] = utils.parse_data(X, label) # New Graph G = tf.Graph() with G.as_default(): # Data input
tensorflow.Graph
4,651
import tensorflow as tf model._task._train_op = tf.no_op() model._task.ApplyExponentialMovingAverage(model.ema) with tf.variable_scope('', reuse=True): beta = tf.get_variable('a/beta/var') mean = tf.get_variable('a/moving_mean/var') self.assertIsNotNone(model.ema.average(beta))
tensorflow.get_variable
4,652
from tensorflow.python.framework import dtypes **self._extra_kwargs) _ = defined.name # Fully instantiate the function definition. if self._grad_func: # If _grad_func is given, it is another # _OverloadedFunction. We need to instantiate it with the # right input types. output_types = [ dtypes.DType(_.type) for _ in defined.definition.signature.output_arg ] # pylint: disable=protected-access defined._grad_func = self._grad_func.instantiate(input_types + output_types) # pylint: enable=protected-access
tensorflow.python.framework.dtypes.DType
4,653
from tensorflow.python.framework import tensor_shape cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits( logits, labels, name=name) return cost @ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits") def _SparseSoftmaxCrossEntropyWithLogitsShape(op): """Shape function for SparseSoftmaxCrossEntropyWithLogits op.""" logits_shape = op.inputs[0].get_shape() input_shape = logits_shape.with_rank(2) batch_size = input_shape[0] # labels_shape op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size)) return [tensor_shape.vector(batch_size.value), input_shape] @ops.RegisterShape("SoftmaxCrossEntropyWithLogits") def _SoftmaxCrossEntropyWithLogitsShape(op): """Shape function for SoftmaxCrossEntropyWithLogits op.""" logits_shape = op.inputs[0].get_shape() labels_shape = op.inputs[1].get_shape() input_shape = logits_shape.merge_with(labels_shape).with_rank(2) batch_size = input_shape[0] return [tensor_shape.vector(batch_size.value), input_shape]
tensorflow.python.framework.tensor_shape.vector
4,654
import tensorflow as tf loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses) with tf.variable_scope("evaluation"): accuracy_1 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_1, axis=-1), tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal(
tensorflow.variable_scope
4,655
import tensorflow as tf """ class PolicyEstimator_Pendulum(): def __init__(self, entropy_beta=0.01, learning_rate=0.01, par_idx=0,scope="policy_estimator"): w_init = tf.random_normal_initializer(0.,.1); with tf.variable_scope(scope+"_"+str(par_idx)): # state, target and action self.state = tf.placeholder(tf.float32, [None,num_state], name="state") self.target = tf.placeholder(tf.float32,[None,1], name="target") self.a_his = tf.placeholder(tf.float32, [None, num_action], name="action_hist") # layers l_a = tf.layers.dense(self.state, 200, tf.nn.relu6, kernel_initializer=w_init, name='la') self.mu = tf.layers.dense(l_a, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value self.sigma = tf.layers.dense(l_a, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance # wrap output self.mu = self.mu * action_bound[1]; self.sigma = self.sigma + 1e-4 # get action from distribution self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) self.action = tf.squeeze(self.normal_dist.sample(1),axis=0); self.action = tf.clip_by_value(self.action, action_bound[0], action_bound[1])
tensorflow.layers.dense
4,656
import tensorflow as tf double_obs_ph = target_policy.obs_ph if double_q: with tf.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")): double_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, layers=layers) double_q_values = double_policy.q_values double_obs_ph = double_policy.obs_ph with tf.variable_scope("loss", reuse=reuse): # set up placeholders act_t_ph = tf.placeholder(tf.int32, [None], name="action") rew_t_ph = tf.placeholder(tf.float32, [None], name="reward") done_mask_ph = tf.placeholder(tf.float32, [None], name="done") importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight") # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(step_model.q_values * tf.one_hot(act_t_ph, n_actions), axis=1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_best_using_online_net = tf.argmax(double_q_values, axis=1) q_tp1_best = tf.reduce_sum(target_policy.q_values * tf.one_hot(q_tp1_best_using_online_net, n_actions), axis=1) else:
tensorflow.placeholder
4,657
import tensorflow as tf # Output bias: self.b_out = tf.get_variable('b_out', [N_out], initializer=b_out_initializer,
tensorflow.get_variable
4,658
import tensorflow as tf element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention
tensorflow.transpose
4,659
import tensorflow as tf Returns: Nx#class logits """ def optimizer(self): lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False) tf.summary.scalar('learning_rate-summary', lr) return tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True) def image_preprocess(self, image): with tf.name_scope('image_preprocess'): if image.dtype.base_dtype != tf.float32: image = tf.cast(image, tf.float32) mean = [0.485, 0.456, 0.406] # rgb std = [0.229, 0.224, 0.225] if self.image_bgr: mean = mean[::-1] std = std[::-1] image_mean = tf.constant(mean, dtype=tf.float32) * 255. image_std = tf.constant(std, dtype=tf.float32) * 255.
tensorflow.name_scope
4,660
import tensorflow as tf norm_grads = None if self.max_grad_norm is not None: grads, norm_grads = tf.clip_by_global_norm(grads, self.max_grad_norm) grads = list(zip(grads, self.params)) with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph)) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate)) tf.summary.scalar('advantage', tf.reduce_mean(adv)) tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph)) if self.full_tensorboard_log: tf.summary.histogram('rewards', self.reward_ph) tf.summary.histogram('learning_rate', self.learning_rate) tf.summary.histogram('advantage', adv) tf.summary.histogram('action_probability', self.mu_ph) if tf_util.is_image(self.observation_space):
tensorflow.reduce_mean
4,661
import tensorflow as tf from_rgb_conv_layers ) return from_rgb_conv_layers def instantiate_discriminator_base_conv_layer_block(self, params): """Instantiates discriminator base conv layer block. Args: params: dict, user passed parameters. Returns: List of base conv layers. """ with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Get conv block layer properties. conv_block = params["discriminator_base_conv_blocks"][0] # Create list of base conv layers. base_conv_layers = [ tf.layers.Conv2D( filters=conv_block[i][3], kernel_size=conv_block[i][0:2], strides=conv_block[i][4:6], padding="same", activation=tf.nn.leaky_relu, kernel_initializer="he_normal", kernel_regularizer=self.kernel_regularizer,
tensorflow.variable_scope
4,662
import tensorflow as tf if self.weight_decay > 0: wd_loss = regularize_cost(self.weight_decay_pattern, tf.contrib.layers.l2_regularizer(self.weight_decay), name='l2_regularize_loss') add_moving_summary(loss, wd_loss)
tensorflow.contrib.layers.l2_regularizer
4,663
from tensorflow.python.ops import math_ops def compute_precision(name): return math_ops.select( math_ops.greater(true_positives + false_positives, 0), math_ops.div(true_positives, true_positives + false_positives), 0, name)
tensorflow.python.ops.math_ops.div
4,664
import tensorflow as tf word_mat, dtype=tf.float32), trainable=False) self.char_mat = tf.get_variable( "char_mat", initializer=tf.constant(char_mat, dtype=tf.float32)) self.c_mask = tf.cast(self.c, tf.bool) self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1)
tensorflow.cast
4,665
import tensorflow as tf [1, 0]], dtype=tf.float32) masks = tf.stack([mask0, mask1, mask2, mask3, mask4, mask5]) classes = tf.constant([1, 2, 3, 1, 2, 3], dtype=tf.int32) scores = tf.constant([1.0, 0.9, 0.8, 0.95, 0.85, 0.6], dtype=tf.float32) (nms_masks1, nms_scores1,
tensorflow.constant
4,666
import tensorflow as tf [100, 8], minval=-10, maxval=10.0, dtype=tf.float32) centers = tf.random.uniform( [5, 8], minval=-10, maxval=10.0, dtype=tf.float32) distances1 = isu.inputs_distances_to_centers(inputs, centers) num_centers = tf.shape(centers)[0] inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1), tf.stack([1, num_centers, 1])) distances2 = tf.reduce_sum(tf.square(inputs_reshaped - centers), axis=2)
tensorflow.shape
4,667
import tensorflow as tf def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True, rewards=None): batch_size = tf.shape(targets)[0] time_steps = tf.shape(targets)[1] logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value])) targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size])) crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_) crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps])) if rewards is not None: crossent *= tf.stop_gradient(rewards) log_perp = tf.reduce_sum(crossent * weights, axis=1) if average_across_timesteps: total_size = tf.reduce_sum(weights, axis=1) total_size += 1e-12 # just to avoid division by 0 for all-0 weights log_perp /= total_size cost = tf.reduce_sum(log_perp)
tensorflow.stop_gradient
4,668
import tensorflow as tf batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2]) mu = tf.get_variable('mu', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, mu) tf.add_to_collection('mu_sigma_bn', mu) sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma) tf.add_to_collection('mu_sigma_bn', sigma) beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer()) gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer()) # BN when training update = 1.0 - decay update_mu = mu.assign_sub(update * (mu - batch_mean)) update_sigma = sigma.assign_sub(update * (sigma - batch_var)) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma) mean, var = tf.cond(self.train_flag, lambda: (batch_mean, batch_var), lambda: (mu, sigma)) bn = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5) tf.add_to_collection('debug_layers', bn)
tensorflow.ones_initializer
4,669
import tensorflow as tf top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c] top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c] same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c] non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1] pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c] dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1] top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1] loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k] loss = tf.reduce_sum(loss) # [] return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
tensorflow.concat
4,670
import tensorflow as tf x = tf.layers.dense(x, units=h, activation=activation) x = tf.layers.dropout(x, rate=dropout_rate, training=True) x = tf.layers.dropout(x, rate=dropout_rate, training=True) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def get_vars(scope): return [x for x in tf.global_variables() if scope in x.name] def count_vars(scope): v = get_vars(scope) return sum([np.prod(var.shape.as_list()) for var in v])
tensorflow.layers.dense
4,671
import tensorflow as tf ndims = len(outputs.shape.as_list()) actual_batch_size = tf.shape(features["inputs"])[0] outputs = tf.slice( outputs, [0] * ndims, [actual_batch_size] + [-1] * (ndims - 1))
tensorflow.slice
4,672
import tensorflow as tf # These should hold all of the variables of the Q-function network and target network, # respectively. A convenient way to get these is to make use of TF's "scope" feature. # For example, you can create your Q-function network with the scope "q_func" like this: # <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False) # And then you can obtain the variables like this: # q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func') # Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES" ###### def q_online(obs_float): return q_func(obs_float,num_actions,scope="online_q_func",reuse=tf.AUTO_REUSE) # Q-function network and target network q_online_t = q_online(obs_t_float) q_online_tp1 = q_online(obs_tp1_float) q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='online_q_func') q_target = q_func(obs_tp1_float,num_actions,scope="target_q_func",reuse=False) target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='target_q_func') # Bellman training error if double_q: q_max = gather_2d(q_target,tf.argmax(q_online_tp1,axis=1,output_type=tf.int32)) else: q_max = tf.reduce_max(q_target,axis=1) target = rew_t_ph + gamma * q_max * (1.0 - done_mask_ph) q_t_act = gather_2d(q_online_t,act_t_ph) total_error = tf.reduce_mean(huber_loss(target - q_t_act)) ###### # construct optimization op (with gradient clipping)
tensorflow.get_collection
4,673
import tensorflow as tf assert tf.get_variable_scope().reuse is False d = tf.contrib.layers.conv2d(layer_input,filters,kernel_size=f_size,stride=stride,padding=padding) if norm: d = tf.contrib.layers.batch_norm(d) d = lrelu(d,alpha=0.2) return d #def common_deconv2d(layer_input,skip_input, filters,f_size=4,stride=2,dropout_rate=0,name='common_deconv2d'): def common_deconv2d(layer_input,filters,f_size=4,stride=2,padding='SAME',dropout_rate=0,name='common_deconv2d'): """Layers used during upsampling""" with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False u = tf.contrib.layers.conv2d_transpose(layer_input,filters,f_size,stride=stride,padding=padding) if dropout_rate: u = tf.contrib.layers.dropout(u,keep_prob=dropout_rate) u = tf.contrib.layers.batch_norm(u)
tensorflow.variable_scope
4,674
from tensorflow.contrib import slim depthwise_regularizer = regularizer else: depthwise_regularizer = None with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_initializer=weights_init,
tensorflow.contrib.slim.arg_scope
4,675
import tensorflow as tf return tf.cast(tf.map_fn(_minibatch_subsample_fn, [indicators, labels],
tensorflow.map_fn
4,676
import tensorflow as tf tower_grads = [] biases_regularizer = tf.no_regularizer weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)
tensorflow.contrib.layers.l2_regularizer
4,677
import tensorflow as tf positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if sample_size is None: max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32)) else: max_num_pos = int(positive_fraction * sample_size) sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) if sample_size is None: negative_positive_ratio = (1 - positive_fraction) / positive_fraction max_num_neg = tf.cast(negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32) else: max_num_neg = sample_size - num_sampled_pos sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx) def batch_sample_balanced_positive_negative(indicators, sample_size, labels,
tensorflow.cast
4,678
import tensorflow as tf channels = tensor_dict[fields.InputDataFields.image_additional_channels] tensor_dict[fields.InputDataFields.image] = tf.concat( [tensor_dict[fields.InputDataFields.image], channels], axis=2) # Apply data augmentation ops. if data_augmentation_fn is not None: tensor_dict = data_augmentation_fn(tensor_dict) # Apply model preprocessing ops and resize instance masks. image = tensor_dict[fields.InputDataFields.image] preprocessed_resized_image, true_image_shape = model_preprocess_fn( tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0)) if use_bfloat16: preprocessed_resized_image = tf.cast( preprocessed_resized_image, tf.bfloat16) tensor_dict[fields.InputDataFields.image] = tf.squeeze( preprocessed_resized_image, axis=0) tensor_dict[fields.InputDataFields.true_image_shape] = tf.squeeze( true_image_shape, axis=0) if fields.InputDataFields.groundtruth_instance_masks in tensor_dict: masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] _, resized_masks, _ = image_resizer_fn(image, masks) if use_bfloat16: resized_masks = tf.cast(resized_masks, tf.bfloat16) tensor_dict[fields.InputDataFields. groundtruth_instance_masks] = resized_masks
tensorflow.cast
4,679
import tensorflow as tf #!/usr/bin/python2.7 # -*- coding:utf-8 -*- # Author: NetworkRanger # Date: 2018/12/22 下午4:06 # 10.3 TensorFlow的并发执行 # 1. 为了能够找到TensorFlow的什么操作正在使用什么设备,我们在计算图会话中传入一个config参数,将log_device_placement设为True。当我们在命令行运行脚本时,会看到指定设备输出 import tensorflow as tf sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) a = tf.constant_initializer([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a') b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') c = tf.matmul(a, b) # Runs the op. print(sess.run(c)) # 2. 从控制台运行下面的命令 """ $ python3 using_multpile_devices.py
tensorflow.ConfigProto
4,680
import tensorflow as tf loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b), tf.boolean_mask(loc_pred, mask_pos_b)) loss_loc = tf.reduce_mean(loss_loc) # classification loss (crossentropy) # 1. compute max conf across batch for hard negative mining loss_class = tf.where(mask_neg, 1 - class_pred[:, 0][..., tf.newaxis], 0) # 2. hard negative mining loss_class = tf.reshape(loss_class, [num_batch, num_prior]) loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING')
tensorflow.where
4,681
import tensorflow as tf """ Get items from this collection that are added in the current tower. """ return self._collection_guard.get_collection_in_tower(key) # TODO currently only used in StagingInput @property def index(self): return self._index @call_only_once def _get_scopes(self): if not len(self._name): # work around https://github.com/tensorflow/tensorflow/issues/14703 return [tf.variable_scope(tf.get_variable_scope())] ret = [] # either the Tower was originally created with reuse, # or a training tower without vs has to use reuse. reuse = (self.is_training and self._index > 0 and not self.has_own_variables) or self._initial_vs_reuse if len(self._vs_name): ret.append(tf.variable_scope(self._vs_name, reuse=reuse)) else: if reuse: ret.append(tf.variable_scope( tf.get_variable_scope(), reuse=True))
tensorflow.get_variable_scope
4,682
import tensorflow as tf pre_sigma = tf.get_variable(name + "_standard_deviation", shape, initializer=std_init, dtype=dtype) standard_deviation = tf.nn.softplus(pre_sigma) + 1e-5 # The famous reparametrization formula for the factorized Gaussian noise = tf.random_normal([num_samples] + shape, 0.0, 1.0, dtype) weights = mean + standard_deviation * noise
tensorflow.nn.softplus
4,683
from tensorflow.python.ops import state_ops with variable_scope.variable_scope(name, 'mean', [values, weights]): total = _create_local('total_tensor', shape=values.get_shape()) count = _create_local('count_tensor', shape=values.get_shape()) num_values = array_ops.ones_like(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.mul(num_values, weights) total_compute_op = state_ops.assign_add(total, values) count_compute_op = state_ops.assign_add(count, num_values) def compute_mean(total, count, name): non_zero_count = math_ops.maximum(count, array_ops.ones_like(count), name=name) return math_ops.truediv(total, non_zero_count, name=name) mean = compute_mean(total, count, 'value')
tensorflow.python.ops.state_ops.assign_add
4,684
from tensorflow.python.ops import math_ops def compute_recall(true_positives, false_negatives, name): return math_ops.select( math_ops.greater(true_positives + false_negatives, 0), math_ops.div(true_positives, true_positives + false_negatives),
tensorflow.python.ops.math_ops.greater
4,685
import tensorflow as tf 1, ]) src_features = tf.gather(src_features, indices, axis=0) src_features = tf.stop_gradient(src_features) src_labels = tf.gather(src_labels, indices) inst_weights = bs * inst_weights / tf.reduce_sum(inst_weights) src_one_hot_labels = tf.one_hot(tf.cast(src_labels, tf.int64), num_classes)
tensorflow.gather
4,686
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc self.assertEqual(x_.shape, x_out.shape) self.assertEqual(x_accept_prob.shape, (hparams.n_samples,)) # Graph mode testing with tf.Graph().as_default(): energy_fn, _, _ = l2hmc.get_scg_energy_fn() dynamics = l2hmc.Dynamics( x_dim=hparams.x_dim, minus_loglikelihood_fn=energy_fn, n_steps=hparams.n_steps, eps=hparams.eps) x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.Dynamics
4,687
import tensorflow as tf loss = tf.reshape(per_example_loss, [-1, tf.shape(positions)[1]]) # TODO: dynamic gather from per_example_loss return loss def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(features, seq_length, max_predictions_per_seq): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_masked_lm_positions = [] all_masked_lm_ids = []
tensorflow.reshape
4,688
import tensorflow as tf # Count unique items to determine embedding matrix sizes entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2])) rel_cnt = len(set(train_triples[:,1])) init_sd = 1.0 / np.sqrt(self.embedding_size) # Embedding variables for all entities and relationship types entity_embedding_shape = [entity_cnt, self.embedding_size] # Relationship embeddings will be stored in flattened format to make # applying maxnorm constraints easier rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size] entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd) rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd) if self.maxnorm is not None: # Ensure maxnorm constraints are initially satisfied entity_init = dense_maxnorm(entity_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) self.entity_embedding_vars = tf.Variable(entity_init) self.rel_embedding_vars = tf.Variable(rel_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Reshape rel_embed into square D x D matrices rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size)) # Reshape head_embed and tail_embed to be suitable for the matrix multiplication head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square) # Output needs a squeeze into a 1d vector raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col)) self.output, self.loss = self._create_output_and_loss(raw_output)
tensorflow.Variable
4,689
import tensorflow as tf out = tf.nn.sigmoid_cross_entropy_with_logits(logits=tensor, labels=target) return out @layer def mean_loss_by_example_layer(tensor, sequence_length, **opts): loss = tf.div( tf.reduce_sum(tensor, axis=1), tf.cast(sequence_length, dtype=tf.float32) ) out = tf.reduce_mean(loss) tf.summary.scalar('cost', out) return out @layer def conv1d_layer(tensor, dilation_rate=1, **opts): raise NotImplementedError
tensorflow.reduce_mean
4,690
import tensorflow as tf tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients] tf.summary.histogram("gradient_norm", gradient_norms) tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return tf.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss):
tensorflow.norm
4,691
import tensorflow as tf cropped_shape = control_flow_ops.with_dependencies( [rank_assertion], tf.pack([crop_height, crop_width, original_shape[2]])) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.']) offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size.
tensorflow.greater_equal
4,692
from tensorflow.python.ops import array_ops def rate(self): """Rate parameter.""" return self._rate def _batch_shape_tensor(self): return array_ops.shape(self.rate) def _batch_shape(self): return self.rate.get_shape()
tensorflow.python.ops.array_ops.shape
4,693
import tensorflow as tf self.deconv_4 = self.deconv_bn_relu(self.deconv_3, name = 'deconv_4',kernel_size = 3, output_channels = 128, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 112*112 self.deconv_5 = self.deconv_bn_relu(self.deconv_4, name = 'deconv_5',kernel_size = 3, output_channels = 64, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 224*224 # self.final_layer = self.conv_layer(bottom = self.deconv_5, kernal_size = 1, in_channels = 64, out_channels = 3, stride = 1, name = 'final_layer') self.final_layer = self.conv_bn_relu(bottom = self.deconv_5, name = 'final_layer', kernel_size = 1, output_channels = 3, initializer =tf.contrib.layers.variance_scaling_initializer(), bn = False, training = self.is_training, relu=False) # self.pool5 = self.avg_pool(self.block4_3, 7, 1, "pool5") #self.fc0 = self.fc_layer(self.pool5, 2048, 1024, "fc0") #self.relu1 = tf.nn.relu(self.fc0) #if train_mode is not None: # self.relu1 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu1, self.dropout), lambda: self.relu1) #elif self.trainable: # self.relu1 = tf.nn.dropout(self.relu1, self.dropout) self.y_soft = tf.nn.softmax(self.final_layer) self.logits = tf.reshape(self.final_layer, (-1, 3)) print(self.logits) self.predicted = tf.argmax(self.final_layer, axis = 3) print(self.predicted.get_shape().as_list()) # cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels, logits=logits, name=None) # self.loss = tf.reduce_mean(cross_entropy, name = 'xcross_entropy') # if(last_layer_type == "sigmoid"): # self.prob = tf.nn.sigmoid(self.fc1, name="prob") # elif(last_layer_type == "softmax"): # self.prob = tf.nn.softmax(self.fc1, name="prob") self.data_dict = None
tensorflow.nn.softmax
4,694
import tensorflow as tf inputs = tf.transpose(inputs, [0, 2, 3, 1]) ksize = int(6 * sigma + 1.) x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1) y = tf.transpose(x, [1, 0]) kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2)) #print(kernel_matrix) kernel_filter = tf.reshape(kernel_matrix, [ksize, ksize, 1, 1]) kernel_filter = tf.tile(kernel_filter, [1, 1, inputs_filters, 1]) #kernel_filter = tf.transpose(kernel_filter, [1, 0, 2, 3]) outputs = tf.nn.depthwise_conv2d(inputs, kernel_filter, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format_, name='blur') if data_format_ == 'NHWC': outputs = tf.transpose(outputs, [0, 3, 1, 2]) return outputs
tensorflow.tile
4,695
import tensorflow as tf if options.pointer_gen: with tf.variable_scope('calculate_pgen'): p_gen = linear([context_t, state_t.c, state_t.h, x], 1, True) # [batch_size, 1] p_gen = tf.sigmoid(p_gen) # Concatenate the cell_output (= decoder state) and the context vector, and pass them through a linear layer
tensorflow.sigmoid
4,696
import tensorflow as tf b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1") conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
tensorflow.shape
4,697
import tensorflow as tf p = tf.gather_nd(x_, blk_indices_) # Reshape patches. p = tf.reshape(p, [blk_shape[0], blk_shape[1], blk_shape[2], -1]) # Convolution on patches.
tensorflow.reshape
4,698
import tensorflow as tf """Learning rate decay factor.""") tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""") tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""") tf.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""")
tensorflow.flags.DEFINE_float
4,699