seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf for mtype, (classes, sdfs, poses) in enumerate([ (labeled_classes, labeled_sdfs, labeled_poses), (predicted_classes, predicted_sdfs, predicted_poses)]): for i in range(classes.shape[0]): if class_id == classes[i]: sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = \ (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples)
tensorflow.reshape
4,100
import tensorflow as tf # Running this command requires an internet connection and a few minutes to download all the images. (X_train, y_train), (X_test, y_test) = tf.contrib.keras.datasets.cifar10.load_data()
tensorflow.contrib.keras.datasets.cifar10.load_data
4,101
import tensorflow as tf with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( tf.reverse_v2(images, [2]), model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = tf.image.resize_bilinear( scales_to_logits[_MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = tf.image.resize_bilinear( tf.reverse_v2(scales_to_logits_reversed[_MERGED_LOGITS_SCOPE], [2]), tf.shape(images)[1:3], align_corners=True)
tensorflow.shape
4,102
import tensorflow as tf import numpy as np import tensorflow as tf class VariationalDense: """Variational Dense Layer Class""" def __init__(self, n_in, n_out, dropout_mask_ph, model_prob=0.9, model_lam=3e-4, activation=None, name="hidden"): self.model_prob = model_prob # probability to keep units self.model_lam = model_lam # l^2 / 2*tau: l=1e-2, tau=[0.1, 0.15, 0.2] self.dropout_mask_ph = dropout_mask_ph # placeholder: p_s * i_s self.p_s = tf.shape(self.dropout_mask_ph)[0] # post sample size self.DM = tf.zeros(shape=[self.p_s, n_in, n_in]) # Dropout masks: p_s * i_s * i_s self.DM = tf.linalg.set_diag(self.DM, self.dropout_mask_ph) kernel_initializer = tf.initializers.truncated_normal(mean=0.0, stddev=0.01) self.model_W = tf.get_variable("{}_W".format(name), initializer=kernel_initializer([n_in, n_out])) # variational parameters self.model_b = tf.get_variable("{}_b".format(name), initializer=tf.zeros([n_out])) self.model_DMW = tf.einsum('pij,jk->pik', self.DM, self.model_W) # Masked weight: p_s * i_s * o_s self.model_tiled_b = tf.tile(tf.reshape(self.model_b, [1, n_out]), [self.p_s, 1]) if activation is None: self.activation = tf.identity else: self.activation = activation
tensorflow.linalg.set_diag
4,103
import tensorflow as tf tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) occupancy_value = tf.math.sign(tf.nn.relu(interpolated + self.tol)) sdf_values += occupancy_value intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))
tensorflow.squeeze
4,104
import tensorflow as tf averages_op = averages.apply(ops) for op in ops: tf.scalar_summary(scope_pfix + op.name + raw_pfix, op) tf.scalar_summary(scope_pfix + op.name + avg_pfix, averages.average(op)) with tf.control_dependencies([averages_op]): for i, dep_op in enumerate(dep_ops): dep_ops[i] = tf.identity(dep_op, name=dep_op.name.split(':')[0]) return dep_ops def exp_average(vec, curr_avg, decay=0.9): vec_avg = tf.reduce_mean(vec, 0) avg = tf.assign(curr_avg, curr_avg * decay + vec_avg * (1-decay)) return avg def gather_vec_pairs(vecs, gather_inds): """ gather obj-subj feature pairs """ vec_pairs = tf.gather(vecs, gather_inds) vec_len = int(vec_pairs.get_shape()[2]) * 2 vec_pairs = tf.reshape(vec_pairs, [-1, vec_len]) return vec_pairs def pad_and_gather(vecs, mask_inds, pad=None):
tensorflow.reduce_mean
4,105
import tensorflow as tf return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tensorflow.logging.info
4,106
import tensorflow as tf xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with tf.variable_scope("model", reuse=tf.AUTO_REUSE): per_example_loss, logits = modeling.regression_loss( hidden=summary, labels=label, initializer=xlnet_model.get_initializer(), scope="regression_{}".format(FLAGS.task_name.lower()), return_logits=True)
tensorflow.variable_scope
4,107
import tensorflow as tf if boundaries is None: boundaries = tf.range(11, dtype=tf.float32) / 10.0 elif isinstance(boundaries, int) or (isinstance(boundaries, tf.Tensor) and boundaries.get_shape().ndims == 0): min_value, max_value = _min_and_max(x, True) boundaries = tf.linspace( tf.cast(min_value, tf.float32), tf.cast(max_value, tf.float32), tf.cast(boundaries, tf.int64)) # Shift the boundaries slightly to account for floating point errors, # and due to the fact that the rightmost boundary is essentially ignored. boundaries = tf.expand_dims(tf.cast(boundaries, tf.float32), 0) - 0.0001 bucket_indices = tf_utils.assign_buckets( tf.cast(x, tf.float32), remove_leftmost_boundary(boundaries))
tensorflow.cast
4,108
import tensorflow as tf predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size]) pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32)
tensorflow.argmax
4,109
import tensorflow as tf self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) # Test when num_decoder_symbols is provided, the size of decoder output # is num_decoder_symbols. with tf.variable_scope("decoder_symbols_seq2seq"): dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5])
tensorflow.nn.seq2seq.embedding_tied_rnn_seq2seq
4,110
import tensorflow as tf strides) h_scale = strides[1] w_scale = strides[2] scale = tf.stack([1, h_scale, w_scale]) indices *= scale # Since we always use VALID to perform pooling, shift is needed here. shift = tf.stack([0, (ksize[1] - 1) // 2, (ksize[2] - 1) // 2]) indices += shift indices_ = tf.expand_dims(tf.expand_dims(indices, 1), 2) # indices_ = tf.tile(indices_, [1, ksize[1], ksize[2], 1]) offset = _get_offset_array(ksize[0:3]) indices_ += offset
tensorflow.stack
4,111
import tensorflow as tf def minibatch_discrimination(x, n_kernels, dim_per_kernel, name): with tf.variable_scope(name): batch_size, nf = x.get_shape().as_list() h = linear(x, [nf, n_kernels*dim_per_kernel], 'h1') activation = tf.reshape(h, (batch_size, n_kernels, dim_per_kernel)) big = tf.eye(batch_size) big = tf.expand_dims(big, 1) abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2) mask = 1. - big masked = tf.exp(-abs_dif) * mask def half(tens, second): m, n, _ = tens.get_shape().as_list() return tf.slice(tens, [0, 0, second*(batch_size/2)], [m, n, batch_size/2]) f1 = tf.reduce_sum(half(masked, 0), 2) / tf.reduce_sum(half(mask, 0)) f2 = tf.reduce_sum(half(masked, 1), 2) / tf.reduce_sum(half(mask, 1)) return tf.concat([x, f1, f2], 1) def batch_norm(x, train, name, decay=0.99, epsilon=1e-5): shape = x.get_shape().as_list()
tensorflow.exp
4,112
import tensorflow as tf logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs)
tensorflow.one_hot
4,113
import tensorflow as tf self.d_loss += self._lambda * penalty_loss z_projs = tf.concat([z_projs_real, z_projs_fake], 0) z_aug_projs = tf.concat([z_aug_projs_real, z_aug_projs_fake], 0) sims_logits = tf.matmul(z_projs, z_aug_projs, transpose_b=True) logits_max = tf.reduce_max(sims_logits,1) sims_logits = sims_logits - tf.reshape(logits_max, [-1, 1]) sims_probs = tf.nn.softmax(sims_logits) sim_labels = tf.constant(np.arange(bs * 2, dtype=np.int32)) sims_onehot = tf.one_hot(sim_labels, bs * 2)
tensorflow.reduce_max
4,114
import tensorflow as tf elif not do_serve: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
tensorflow.logging.info
4,115
import tensorflow as tf ) any_condition = tf.reduce_any( input_tensor=[divisbility_condition, less_than_condition],
tensorflow.reduce_any
4,116
from tensorflow.python.ops import math_ops # whether we should use the first or last index in case of ties. min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
tensorflow.python.ops.math_ops.abs
4,117
import tensorflow as tf epsilon = 1e-5 mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) scale = tf.get_variable('scale',[x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) offset = tf.get_variable('offset',[x.get_shape()[-1]],initializer=tf.constant_initializer(0.0)) out = scale*tf.div(x-mean, tf.sqrt(var+epsilon)) + offset return out
tensorflow.truncated_normal_initializer
4,118
import tensorflow as tf filter_shape = [encoder.attn_filter_length * 2 + 1, 1, 1, encoder.attn_filters] filter_ = get_variable('filter', filter_shape) prev_weights = tf.reshape(prev_weights, tf.stack([batch_size, time_steps, 1, 1])) conv = tf.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME') conv = tf.squeeze(conv, axis=2) y += dense(conv, encoder.attn_size, use_bias=False, name='C_a') v = get_variable('v_a', [encoder.attn_size]) return tf.reduce_sum(v * tf.tanh(y), axis=2) def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs): with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)): if context is not None and encoder.use_context: state = tf.concat([state, context], axis=1) e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32) e *= mask if encoder.attn_norm_fun == 'none': weights = e elif encoder.attn_norm_fun == 'sigmoid': weights = tf.nn.sigmoid(e) elif encoder.attn_norm_fun == 'max': weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1]) else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0
tensorflow.concat
4,119
import tensorflow as tf # early. if tf.executing_eagerly(): return # Test case 3. x = tf.placeholder_with_default(input=1, shape=None) is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x)) self.assertTrue(self.evaluate(is_scalar)) x = tf.placeholder_with_default(input=[1], shape=None) is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x)) self.assertFalse(self.evaluate(is_scalar)) def _GetFakeDistribution(self): class FakeDistribution(tfd.Distribution): """Fake Distribution for testing _set_sample_static_shape.""" def __init__(self, batch_shape=None, event_shape=None):
tensorflow.placeholder_with_default
4,120
import tensorflow as tf eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput(
tensorflow.summary.scalar
4,121
import tensorflow as tf def extract_output(self, accumulator): result = accumulator.GetQuantiles(self._num_quantiles).to_pylist() if not result: return [np.zeros(self._output_shape, self._bucket_numpy_dtype)] result = np.array(result, self._bucket_numpy_dtype) # Trim elementwise results if max and min should be excluded. if not self._include_max_and_min: result = result[:, 1:-1] return [np.reshape(result, self._output_shape)] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._bucket_numpy_dtype), self._output_shape, None) ] @property def accumulator_coder(self): return _QuantilesSketchCacheCoder() class _QuantilesSketchCacheCoder(analyzer_nodes.CacheCoder): """Cache coder for the quantiles accumulator.""" def encode_cache(self, accumulator): # TODO(b/174549940): Consider exposing and calling # `QuantilesSketch::Serialize` directly.
tensorflow.as_dtype
4,122
import tensorflow as tf loss: Loss tensor of type float. """ with tf.name_scope('segment_loss'): # logits = tf.reshape(logits, (-1, num_classes)) epsilon = tf.constant(value=1e-7) labels = tf.to_float(labels) # labels = tf.to_float(tf.reshape(labels, (-1, num_classes))) softmax = tf.nn.softmax(logits) + epsilon if head is not None: cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head), axis=[1]) else: cross_entropy = -tf.reduce_sum(labels * tf.log(softmax), axis=[1]) cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean') return cross_entropy_mean def triplet_loss(anchor, positive, negative, alpha=0.2, name='triplet_loss'): """Calculate the triplet loss according to the FaceNet paper. Args: anchor: 2-D `tensor` [batch_size, embedding_size], the embeddings for the anchor images. positive: 2-D `tensor` [batch_size, embedding_size], the embeddings for the positive images. negative: 2-D `tensor` [batch_size, embedding_size], the embeddings for the negative images. alpha: positive to negative triplet distance margin Returns: the triplet loss.
tensorflow.reduce_mean
4,123
import tensorflow as tf W_out_initializer = tf.constant_initializer( 0.1 * np.random.uniform(-1, 1, size=(self.N_out, self.N_rec))) b_rec_initializer = tf.constant_initializer(0.0) b_out_initializer = tf.constant_initializer(0.0) else: print("Loading Weights") weights = np.load(self.load_weights_path) init_state_initializer = tf.constant_initializer(weights['init_state']) W_in_initializer = tf.constant_initializer(weights['W_in']) W_rec_initializer = tf.constant_initializer(weights['W_rec']) W_out_initializer = tf.constant_initializer(weights['W_out']) b_rec_initializer = tf.constant_initializer(weights['b_rec']) b_out_initializer = tf.constant_initializer(weights['b_out']) self.input_connectivity_mask = weights['input_Connectivity'] self.recurrent_connectivity_mask = weights['rec_Connectivity'] self.output_connectivity_mask = weights['output_Connectivity'] self.init_state = tf.get_variable('init_state', [N_batch, N_rec], initializer=init_state_initializer)
tensorflow.constant_initializer
4,124
import tensorflow as tf with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # nut.print_model_info() # nut.list_checkpoint_vars(self.get_latest_checkpoint().replace(EMB_SUFFIX, '')) self.saver = tf.train.Saver() self._restore_model(sess) # nut.print_model_info() encoding, decoding = None, None
tensorflow.train.Saver
4,125
import tensorflow as tf @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def pad_dataset_to_length(dataset, training, len_map=None): """Pad features less than specified length to specified length.""" del training if len_map is None: return dataset def pad_to_len(x): for key, max_len in len_map.items(): x_shape = tf.shape(x[key]) x_len = x_shape[0] if x_len < max_len: pad_shape = [ max_len - x_len, ] zeros = tf.zeros(pad_shape, dtype=x[key].dtype) x[key] = tf.concat([x[key], zeros], 0) return x
tensorflow.shape
4,126
import tensorflow as tf """ with tf.name_scope(name): learning_rate = tf.cast(learning_rate, dtype=tf.float32) global_step = tf.cast(global_step, dtype=tf.float32) step_size = tf.cast(step_size, dtype=tf.float32) max_lr = tf.cast(max_lr, dtype=tf.float32) if mode == 'tri': periodic_comp = tf.mod((global_step + step_size / 4) / step_size, 1) first_factor = tf.abs(periodic_comp - 0.5) second_factor = 2 * (max_lr - learning_rate) second_comp = learning_rate elif mode == 'sin': first_factor = (learning_rate - max_lr) / 2. second_factor = tf.sin((pi * global_step) / step_size) second_comp = (learning_rate + max_lr) / 2. elif mode == 'saw': first_factor = max_lr - learning_rate second_factor = tf.mod(global_step / step_size, 1) second_comp = learning_rate return first_factor * second_factor + second_comp
tensorflow.sin
4,127
import tensorflow as tf Create the summary part of the graph :return: """ with tf.variable_scope('train-summary-per-epoch'): for tag in self.scalar_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag) self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag]) for tag, shape in self.images_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = tf.placeholder('float32', shape, name=tag) self.summary_ops[tag] = tf.summary.image(tag, self.summary_placeholders[tag], max_outputs=10) def add_summary(self, step, summaries_dict=None, summaries_merged=None): """ Add the summaries to tensorboard :param step: :param summaries_dict: :param summaries_merged: :return: """ if summaries_dict is not None:
tensorflow.summary.image
4,128
import tensorflow as tf log_r *= tf.to_float(t < self.seq_lengths - 1) weights += log_r - prev_log_r new_state = TrainableVRNNState(rnn_state=next_rnn_state, rnn_out=rnn_out, latent_encoded=latent_encoded) return weights, new_state _DEFAULT_INITIALIZERS = {"w": tf.contrib.layers.xavier_initializer(), "b": tf.zeros_initializer()} def create_vrnn( data_size, latent_size, emission_class, rnn_hidden_size=None, fcnet_hidden_sizes=None,
tensorflow.zeros_initializer
4,129
import tensorflow as tf assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss]) with tf.control_dependencies([assert_op]): tag_loss = 'losses/domain_loss'
tensorflow.control_dependencies
4,130
import tensorflow as tf l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out
tensorflow.cast
4,131
import tensorflow as tf same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates] same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
tensorflow.expand_dims
4,132
from tensorflow.python.ops import math_ops labels, labels, weights=weights, name='variance_labels') pearson_r = _safe_div( cov, math_ops.mul(math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)), 'pearson_r') with ops.control_dependencies( [update_cov, update_var_predictions, update_var_labels]): update_op = _safe_div(update_cov, math_ops.mul( math_ops.sqrt(update_var_predictions), math_ops.sqrt(update_var_labels)), 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, pearson_r) if updates_collections: ops.add_to_collections(updates_collections, update_op)
tensorflow.python.ops.math_ops.sqrt
4,133
import tensorflow as tf "url.") tf.flags.DEFINE_string( "tpu_zone", None,
tensorflow.flags.DEFINE_string
4,134
import tensorflow as tf IMAGE_SIZE = 42 WARP_TARGET_SIZE = 28 HALF_DIFF = (IMAGE_SIZE - WARP_TARGET_SIZE) // 2 class Model(ModelDesc): def _get_inputs(self): return [InputDesc(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE, 2), 'input'), InputDesc(tf.int32, (None,), 'label')] def _build_graph(self, inputs): xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE) for x in range(WARP_TARGET_SIZE)], dtype='float32') xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3 image, label = inputs image = image / 255.0 - 0.5 # bhw2 def get_stn(image): stn = (LinearWrap(image) .AvgPooling('downsample', 2) .Conv2D('conv0', 20, 5, padding='VALID') .MaxPooling('pool0', 2) .Conv2D('conv1', 20, 5, padding='VALID') .FullyConnected('fc1', out_dim=32) .FullyConnected('fct', out_dim=6, nl=tf.identity,
tensorflow.constant
4,135
import tensorflow as tf def main(_): tf.reset_default_graph() # Import data cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir) with tf.variable_scope('inputs'): # Create the model x = tf.placeholder(tf.float32, [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels]) # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes]) # Whether model is training train = tf.placeholder(tf.bool, []) # Build the graph for the deep net y_conv, img_summary = deepnn(x, train) # Define your loss function - softmax_cross_entropy with tf.variable_scope('x_entropy'): cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) # Define your AdamOptimiser, using FLAGS.learning_rate to minimixe the loss function decayed_learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, tf.Variable(0, trainable=False), 1000, 0.8) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
tensorflow.placeholder
4,136
import tensorflow as tf if start >= self.train_data_len: start = 0 new_epoch_flag = True yield x_batch, y_batch def init_summaries(self): """ Create the summary part of the graph :return: """ with tf.variable_scope('train-summary-per-epoch'): for tag in self.scalar_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag) self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag]) for tag, shape in self.images_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = tf.placeholder('float32', shape, name=tag) self.summary_ops[tag] = tf.summary.image(tag, self.summary_placeholders[tag], max_outputs=10) def add_summary(self, step, summaries_dict=None, summaries_merged=None):
tensorflow.variable_scope
4,137
import tensorflow as tf self.summary_writer.add_summary(summary, global_step=self.get_past_epochs()) def _add_decoding_summary(self, name, var, collection='train'): var = var[:FLAGS.visualiza_max] var = tf.concat(tf.unstack(var), axis=0) var = tf.expand_dims(var, dim=0) color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max) var = tf.expand_dims(var[..., 3], dim=3) bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max) return tf.summary.merge([color_s, bw_s])
tensorflow.summary.image
4,138
import tensorflow as tf def batch_shaped(t): return graph_typecheck.assert_shape(t, [batch_size]) def batch_win_shaped(t): return graph_typecheck.assert_shape(t, [batch_size, window_size]) def batch_2win_shaped(t): return graph_typecheck.assert_shape(t, [batch_size, 2 * window_size]) def tile_to_2win(t): return tf.tile(tf.expand_dims(t, 1), [1, 2 * window_size]) closest_time = batch_shaped(band_features['closest_time_in_band']) closest_flux = batch_shaped(band_features['closest_flux_in_band']) self.in_window = tf.less( batch_shaped(band_features["closest_time_diff"]), band_time_diff ) # Before and after flux. before_flux = batch_win_shaped(band_features["before_flux"])
tensorflow.expand_dims
4,139
import tensorflow as tf Raises: IOError: The logging directory does not contain a configuration file. Returns: Configuration object. """ print(logdir) config_path = logdir and os.path.join(logdir, 'config.yaml') if not config_path or not tf.gfile.Exists(config_path): message = ( 'Cannot resume an existing run since the logging directory does not ' 'contain a configuration file.') raise IOError(message) with tf.gfile.GFile(config_path, 'r') as file_: print('try to load') config = yaml.load(file_, yaml.Loader) message = 'Resume run and write summaries and checkpoints to {}.'
tensorflow.gfile.Exists
4,140
import tensorflow as tf output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. labels: (tf.Tensor) A tensor of the same shape as `output`. A value >= 1 means a relevant example. propensity: No use. name: A string used as the name for this variable scope. Returns: (tf.Tensor) A single value tensor containing the loss. """ loss = None with tf.name_scope(name, "softmax_loss",[output]): label_dis = labels / tf.reduce_sum(labels, 1, keep_dims=True) loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels, 1) return tf.reduce_sum(loss) / tf.reduce_sum(labels) def get_normalized_weights(self, propensity): """Computes listwise softmax loss with propensity weighting. Args: propensity: (tf.Tensor) A tensor of the same shape as `output` containing the weight of each element. Returns: (tf.Tensor) A tensor containing the propensity weights. """
tensorflow.reduce_sum
4,141
import tensorflow as tf dtype=self.dtype)) else: bias_init = tf.ones(bias_shape, dtype=self.dtype) setattr( self, 'gain_bias_%s' % layer, tf.get_variable( name='%s_gain_bias' % self.layer_name, dtype=self.dtype, trainable=True, initializer=bias_init)) if self.gate_bias_init == 'chronos': bias_init = -bias_init else: bias_init = tf.ones(bias_shape, dtype=self.dtype) setattr( self, 'mix_bias_%s' % layer, tf.get_variable( name='%s_mix_bias' % self.layer_name, dtype=self.dtype, trainable=True, initializer=bias_init)) # Divisive params if self.alpha and not self.lesion_alpha: setattr( self,
tensorflow.ones
4,142
import tensorflow as tf batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1)) update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean)) update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var)) with tf.control_dependencies([update_mean, update_var]): return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon) def func2(): # execute at test time
tensorflow.nn.batch_normalization
4,143
import tensorflow as tf # Memory allocation on the GPU as needed config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True # Train with tf.Session(config=config) as sess: try: summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph) sess.run(tf.global_variables_initializer()) # saved model restoring if args.restore: # Restore saved model if the user requested it, default = True try:
tensorflow.summary.FileWriter
4,144
from tensorflow.python.platform import gfile self.assertTrue(gfile.Exists(s1)) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s2)) s3 = save.save(sess, os.path.join(save_dir, "s3"))
tensorflow.python.platform.gfile.Exists
4,145
import tensorflow as tf if x.dtype.base_dtype == tf.bool: x = tf.cast(x, tf.float32) m = tf.reduce_mean(x, axis=axis, keep_dims=True) devs_squared = tf.square(x - m) return tf.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims) def euclidean_distance(test, support, max_dist_sq=20):
tensorflow.reduce_mean
4,146
from tensorflow.python.framework import ops @ops.RegisterShape("Assign") def _AssignShape(op):
tensorflow.python.framework.ops.RegisterShape
4,147
import tensorflow as tf t_vars = tf.trainable_variables() else: try: # TF1.0+ t_vars = tf.global_variables() except Exception: # TF0.12 t_vars = tf.all_variables() d_vars = [var for var in t_vars if name in var.name] if printable: for idx, v in enumerate(d_vars):
tensorflow.all_variables
4,148
import tensorflow as tf self.D_A_loss = (self.D_A_loss_real + self.D_A_loss_fake) / 2.0 self.discriminator_loss = self.D_B_loss + self.D_A_loss self.loss_GABA_sum = tf.summary.scalar("g_loss_a2b", self.loss_GABA) self.loss_GBAB_sum = tf.summary.scalar("g_loss_b2a", self.loss_GBAB) self.g_total_loss_sum = tf.summary.scalar("g_loss", self.generator_loss) self.g_sum = tf.summary.merge([self.loss_GABA_sum,self.loss_GBAB_sum,self.g_total_loss_sum]) self.loss_db_sum = tf.summary.scalar("db_loss", self.D_B_loss) self.loss_da_sum = tf.summary.scalar("da_loss", self.D_A_loss) self.loss_d_sum = tf.summary.scalar("d_loss",self.discriminator_loss) self.db_loss_real_sum = tf.summary.scalar("db_loss_real", self.D_B_loss_real) self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.D_B_loss_fake) self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.D_A_loss_real) self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.D_A_loss_fake) self.d_sum = tf.summary.merge( [self.loss_da_sum, self.da_loss_real_sum, self.da_loss_fake_sum, self.loss_db_sum, self.db_loss_real_sum, self.db_loss_fake_sum,
tensorflow.summary.scalar
4,149
import tensorflow as tf d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
tensorflow.layers.dense
4,150
import tensorflow as tf antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k] antecedents_mask = antecedent_offsets >= 1 # [k, k] fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k] fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k] fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k] _, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c] top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c] top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c] top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c] return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
tensorflow.nn.top_k
4,151
import tensorflow as tf G = tf.Graph() with G.as_default(): # Data input X = tf.placeholder(tf.float32, [None, input_node], name = 'x-Input') y_ = tf.placeholder(tf.float32, [None, output_node], name = 'label-Input') # hidden layers self.nnweights = [] # collect weights of network prev_node = input_node prev_x = X for i in range(len(hidden_layers_node)): layer_name = 'layer' + str(i+1) with tf.variable_scope(layer_name, reuse=tf.AUTO_REUSE): weights = tf.get_variable('weights', [prev_node, hidden_layers_node[i]], initializer=tf.truncated_normal_initializer(stddev=0.1)) self.nnweights.append(weights) biases = tf.get_variable('biases', [hidden_layers_node[i]], initializer=tf.constant_initializer(0.0)) layer_out = tf.nn.dropout(tf.matmul(prev_x, weights) + biases, dropout_keep_prob) if activation == 'relu': layer_out = tf.nn.relu(layer_out) elif activation == 'sigmoid': layer_out = tf.nn.sigmoid(layer_out) elif activation == 'tanh':
tensorflow.truncated_normal_initializer
4,152
import tensorflow as tf weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(len(pred_outputs) - 1), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN)) else: for pred_ind in list(range(len(pred_outputs))): mse_loss_list.append(tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind], weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(pred_ind), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements mse_loss = tf.multiply(params['mse_weight'], tf.add_n(mse_loss_list), name='mse_loss') tf.summary.scalar('mse', mse_loss)
tensorflow.cast
4,153
from tensorflow.python.ops import gen_math_ops # tensor `imag` is [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]] ``` Args: real: A `Tensor` of type `float`. imag: A `Tensor` of type `float`. name: A name for the operation (optional). Returns: A `Tensor` of type `complex64`. """ with ops.op_scope([real, imag], name, "Complex") as name: return gen_math_ops._complex(real, imag, name=name) def round(x, name=None): """Rounds the values of a tensor to the nearest integer, element-wise. For example: ```python # 'a' is [0.9, 2.5, 2.3, -4.4] tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ] ```
tensorflow.python.ops.gen_math_ops._complex
4,154
import tensorflow as tf if s.ndim < 4: s = s[np.newaxis, :] return self.sess.run(self.v, {self.tfs: s})[0, 0] def load(self): saver = tf.train.Saver() saver.restore(self.sess, './model_save/params') def save(self): saver = tf.train.Saver()
tensorflow.train.Saver
4,155
import tensorflow as tf if self.lesion_omega: setattr( self, 'omega_%s' % layer, tf.constant(0.)) if self.lesion_kappa: setattr( self, 'kappa_%s' % layer, tf.constant(0.)) if self.reuse: # Make the batchnorm variables scopes = ['g1_bn', 'g2_bn', 'c1_bn', 'c2_bn'] bn_vars = ['moving_mean', 'moving_variance', 'gamma'] for s in scopes: with tf.variable_scope(s): for v in bn_vars: tf.get_variable( trainable=self.param_trainable[v], name=v, dtype=self.dtype, shape=[self.hgru_k[idx]], collections=self.param_collections[v], initializer=self.param_initializer[v]) self.param_initializer = None def resize_x_to_y( self, x, y,
tensorflow.variable_scope
4,156
import tensorflow as tf vocab_dist = tf.concat(values=[vocab_dist, extra_zeros], axis=1) # [batch_size, extended_vsize] if self.options.add_first_word_prob_for_phrase: # add prob of the first word to each phrase attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts, vocab_dist, attn_dist) # match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize] batch_nums = tf.range(0, limit=batch_size) # shape (batch_size) batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1) batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length) step_nums = tf.range(0, limit=passage_length) # [passage_length] step_nums = tf.expand_dims(step_nums, axis=0) # shape (1, passage_length) step_nums = tf.tile(step_nums, [batch_size, 1]) # shape (batch_size, passage_length) indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3) indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3] indices = tf.cast(indices, tf.int64) shape = [batch_size, passage_length, extended_vsize] shape = tf.cast(shape, tf.int64) attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length] one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize] if passage_mask is not None: passage_mask = tf.expand_dims(passage_mask, axis=-1)
tensorflow.stack
4,157
import tensorflow as tf tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat()
tensorflow.constant
4,158
import tensorflow as tf Tout=tf.float32) img = inputs_list[i][0] img_shape = inputs_list[i][-2:] img = tf.image.crop_to_bounding_box(image=img, offset_height=0, offset_width=0, target_height=tf.cast(img_shape[0], tf.int32), target_width=tf.cast(img_shape[1], tf.int32)) outputs = r3det_dcl.build_whole_detection_network(input_img_batch=img, gtboxes_batch_h=gtboxes_and_label_h, gtboxes_batch_r=gtboxes_and_label_r, gt_encode_label=gt_encode_label, gpu_id=i) gtboxes_in_img_h = self.drawer.draw_boxes_with_categories(img_batch=img,
tensorflow.cast
4,159
import tensorflow as tf options.gen_hidden_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113), state_is_tuple=True) self.placeholders = placeholders with tf.variable_scope("embedding"), tf.device('/cpu:0'): self.embedding = tf.get_variable('word_embedding', trainable=(options.fix_word_vec==False), initializer=tf.constant(self.vocab.word_vecs), dtype=tf.float32) if options.with_phrase_projection: self.max_phrase_size = placeholders.max_phrase_size if options.add_first_word_prob_for_phrase: self.in_passage_words = placeholders.in_passage_words self.phrase_starts = placeholders.phrase_starts
tensorflow.constant
4,160
import tensorflow as tf with tf.name_scope('LossB'):
tensorflow.name_scope
4,161
import tensorflow as tf [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()):
tensorflow.FixedLenFeature
4,162
import tensorflow as tf conf_mat = tf.matmul(tf.transpose(pred_norm), labels) nom = tf.reduce_sum(weights * conf_mat) denom = tf.reduce_sum(weights * tf.matmul( tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) / tf.to_float(batch_size)) try: return -(1 - nom / denom)
tensorflow.reshape
4,163
from tensorflow.python.platform import gfile # Deleted by the first helper. self.assertFalse(gfile.Exists(s3))
tensorflow.python.platform.gfile.Exists
4,164
import tensorflow as tf tf.initialize_all_variables().run() self.assertAllClose(543.21, v0_2.eval()) save.restore(sess, save_path) self.assertAllClose(123.45, v0_2.eval()) def testVariables(self): save_path = os.path.join(self.get_temp_dir(), "variables") with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables()) init.run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess:
tensorflow.Variable
4,165
import tensorflow as tf tf.equal(sl_unhead, 0), lambda: tf.zeros([bs, 0, hn], tf.float32), lambda: mean_pooling_for_unselected_head( unhead_org_idx, sl_unhead, rep_unhead_mask, input_idx, sl, rep_mask, rep_map, None) # todo: point ! ) with tf.variable_scope('output'): if keep_unselected: range_head = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_head]) scatter_attn = tf.cond( tf.equal(sl_head, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32), lambda: tf.scatter_nd( tf.stack([range_head, head_org_idx], -1), attn_result, [bs, sl+1, hn]) ) range_unhead = tf.tile(tf.expand_dims(tf.range(bs), -1), [1, sl_unhead]) scatter_pooling = tf.cond( tf.equal(sl_unhead, 0), lambda: tf.zeros([bs, sl+1, hn], tf.float32), lambda: tf.scatter_nd( tf.stack([range_unhead, unhead_org_idx], -1), pooling_result, [bs, sl+1, hn]) )
tensorflow.zeros
4,166
import tensorflow as tf h = norm(n+m, 'ln_2') return h def embed(X, we): #X [-1,,2] we = convert_gradient_to_tensor(we) e = tf.gather(we, X) h = tf.reduce_sum(e, 2) return h def clf(x, ny, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), train=False): with tf.variable_scope('clf'): nx = shape_list(x)[-1] w = tf.get_variable("w", [nx, ny], initializer=w_init) b = tf.get_variable("b", [ny], initializer=b_init) return tf.matmul(x, w)+b def model(X, M, Y, train=False, reuse=False): with tf.variable_scope('model', reuse=reuse): we = tf.get_variable("we", [n_vocab+n_special+n_ctx, n_embd], initializer=tf.random_normal_initializer(stddev=0.02))
tensorflow.random_normal_initializer
4,167
import tensorflow as tf try: sess.run(var) except tf.errors.FailedPreconditionError: uninitialized_vars.append(var) init_new_vars_op = tf.variables_initializer(uninitialized_vars) sess.run(init_new_vars_op) # Evalute the network for the given image pred = sess.run(net.get_output(), feed_dict={input_node: img})
tensorflow.variables_initializer
4,168
from tensorflow.contrib.layers.python.layers import utils return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = utils.smart_cond( is_training,
tensorflow.contrib.layers.python.layers.utils.constant_value
4,169
import tensorflow as tf def _reshape_layer(self, bottom, num_dim, name): input_shape = tf.shape(bottom) with tf.variable_scope(name): # change the channel to the caffe format # 18个通道[,18,none,none],分别显示得分,前9个为前景得分,后9个为背景得分 # 第二次[1,2,none,none] to_caffe = tf.transpose(bottom, [0, 3, 1, 2]) # then force it to have channel 2 #[1,2,none.none],将9个anchor的前景得分和背景得分分开 # 第二次[1,18,none,none] reshaped = tf.reshape(to_caffe, tf.concat(axis=0, values=[[self._batch_size], [num_dim, -1], [input_shape[2]]])) # then swap the channel back # [1,none,none,2], 第一个none应该为(行*9) # 第二次[1,none,none,18] to_tf = tf.transpose(reshaped, [0, 2, 3, 1]) return to_tf def _softmax_layer(self, bottom, name): if name == 'rpn_cls_prob_reshape': input_shape = tf.shape(bottom)
tensorflow.concat
4,170
import tensorflow as tf m = Model(is_training=True, config=config, input_=train_input, graph=train_graph) tf.summary.scalar('Training Loss', m.cost)
tensorflow.summary.scalar
4,171
import tensorflow as tf def train_input_fn(params={}): # make some fake noise data_size = 100 noise_tensor = tf.random_normal((data_size, INPUT_DIM)) real_data_tensor = tf.random_uniform((data_size, OUTPUT_DIM))
tensorflow.random_normal
4,172
import tensorflow as tf print('\nselfAdjointEig(D):') print(sess.run(tf.self_adjoint_eig(D))) print(sess.run(tf.div(13, 4))) print(sess.run(tf.truediv(13, 4))) print(sess.run(tf.floordiv(13, 4))) print(sess.run(tf.mod(13.2, 4))) print(sess.run(tf.cross([1, 0, 0], [0, 1, 0]))) print(sess.run(tf.square([1, 2, 3]))) def custom_polynomial(local_tf, value): return local_tf.subtract(3 * local_tf.square(value), value) + 10 print((sess.run(custom_polynomial(tf, 11))))
tensorflow.square
4,173
import tensorflow as tf input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = tf.contrib.rnn.GRUCell(num_units) gru_bw = tf.contrib.rnn.GRUCell(num_units) init_fw = tf.tile(tf.Variable( tf.zeros([1, num_units])), [batch_size, 1]) init_bw = tf.tile(tf.Variable( tf.zeros([1, num_units])), [batch_size, 1]) mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, ))
tensorflow.zeros
4,174
import tensorflow as tf input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id,
tensorflow.logging.info
4,175
import tensorflow as tf self.evaluate(log_cdf), self._scipy_pareto(concentration_v, scale_v).logcdf(x)) cdf = pareto.cdf(x) self.assertEqual(cdf.shape, (6, 3)) self.assertAllClose( self.evaluate(cdf), self._scipy_pareto(concentration_v, scale_v).cdf(x)) def testParetoPDFGradientZeroOutsideSupport(self): scale = tf.constant(1.) concentration = tf.constant(3.) # Check the gradient on the undefined portion. x = scale - 1 pareto = tfd.Pareto(concentration, scale) compute_pdf = lambda x: pareto.prob(x) # pylint:disable=unnecessary-lambda self.assertAlmostEqual(self.compute_gradients( compute_pdf, args=[x])[0], 0.) def testParetoCDFGradientZeroOutsideSupport(self): scale = tf.constant(1.)
tensorflow.constant
4,176
import tensorflow as tf mask = tf.pad( tf.zeros(cutout_shape, dtype=images.dtype), padding_dims, constant_values=1) patch = tf.ones_like(images, dtype=images.dtype) * replace, mask = tf.expand_dims(mask, -1) mask = tf.tile(mask, [1, 1, num_channels])
tensorflow.ones_like
4,177
import tensorflow as tf if FLAGS.ckpt_no is not None and not tf.gfile.Exists(path_ckpt): with tf.gfile.GFile(path_ckpt, "w") as writer: writer.write('model_checkpoint_path: "%s-%s"\n' % (os.path.join(FLAGS.recover_dir, "model.ckpt"), str(FLAGS.ckpt_no)))
tensorflow.gfile.GFile
4,178
from tensorflow.python.ops import control_flow_ops def initializer(self): return control_flow_ops.group([v.initializer for v in self._vars])
tensorflow.python.ops.control_flow_ops.group
4,179
import tensorflow as tf "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), }
tensorflow.FixedLenFeature
4,180
import tensorflow as tf biases_regularizer = tf.no_regularizer weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)
tensorflow.contrib.layers.l2_regularizer
4,181
import tensorflow as tf if mode_gen == 'greedy': wordidx_t = tf.argmax(output_t, 1) # [batch_size] wordidx_t = tf.reshape(wordidx_t, [-1]) # [batch_size] elif mode_gen == 'sample': log_score_t = tf.log(output_t) # [batch_size, vsize] wordidx_t = tf.multinomial(log_score_t, 1) # [batch_size, 1] wordidx_t = tf.reshape(wordidx_t, [-1]) # [batch_size] elif mode_gen in ('ce_train', 'loss',): wordidx_t = answer_batch_unstack[i] else:
tensorflow.multinomial
4,182
import tensorflow as tf nfeats_tot = nfeats_conv + nfeats feats_all = tf.concat(1, [feats_conv, feats_other]) print('feats_cnn: {}'.format(feats_conv.get_shape())) print('feats_all: {}'.format(feats_all.get_shape())) # Project to RNN size rnn_output = feats_all rnn_output_size = nfeats_tot if do_rnn: with tf.variable_scope('rnn_proj'): rnn_proj_w = tf.get_variable('W', [nfeats_tot, rnn_size], initializer=tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype), dtype=dtype) rnn_proj_b = tf.get_variable('b', [rnn_size], initializer=tf.constant_initializer(0.0), dtype=dtype) rnn_inputs = tf.nn.bias_add(tf.matmul(feats_all, rnn_proj_w), rnn_proj_b) rnn_inputs = tf.reshape(rnn_inputs, [batch_size, rnn_nunroll, rnn_size]) rnn_inputs = tf.split(rnn_inputs, rnn_nunroll, axis=1) rnn_inputs = [tf.squeeze(input_, [1]) for input_ in rnn_inputs] if rnn_cell_type == 'rnn': cell_fn = tf.nn.rnn_cell.BasicRNNCell elif rnn_cell_type == 'gru': cell_fn = tf.nn.rnn_cell.GRUCell elif rnn_cell_type == 'lstm': cell_fn = tf.nn.rnn_cell.BasicLSTMCell else: raise NotImplementedError() cell = cell_fn(rnn_size) if mode == 'train' and rnn_keep_prob < 1.0: cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=rnn_keep_prob)
tensorflow.split
4,183
import tensorflow as tf def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
tensorflow.ones_like
4,184
import tensorflow as tf op_Xs.append(op_X) # Stack operation outputs and index by op op_Xs = tf.stack(op_Xs) X = op_Xs[op]
tensorflow.stack
4,185
import tensorflow as tf if not config.aug_same and config.aug: recon_feat = tf.concat([features, target['aug']], -1) print('Use recon feature ', name, recon_feat) logprob = heads[name](recon_feat).log_prob(target[name]) # logprob = heads[name](features).log_prob(target['ori_img']) else: logprob = heads[name](features).log_prob(target[name]) objectives.append(Objective(name, logprob, max, include, exclude)) objectives = [o._replace(value=tf.reduce_mean(o.value)) for o in objectives] return objectives, cstr_pct def contra_step_lossV1(pred, tgt, temp=10.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0)
tensorflow.reduce_mean
4,186
import tensorflow as tf super().release() self.sess.close() tf.reset_default_graph()
tensorflow.reset_default_graph
4,187
from tensorflow.python.framework import ops @ops.RegisterStatistics("Conv2D", "flops") def _calc_conv_flops(graph, node):
tensorflow.python.framework.ops.RegisterStatistics
4,188
import tensorflow as tf # target q network evalution q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func") target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func") # q scores for actions which we know were selected in the given state.
tensorflow.get_variable_scope
4,189
import tensorflow as tf # Embedding + 2 LSTM layers + hidden layers + logits layer expected_num_vars = 1 + 2 * 2 * FLAGS.rnn_num_layers + 2 * ( FLAGS.cl_num_layers) + 2 self.assertEqual(len(tf.trainable_variables()), expected_num_vars) def testEvalGraph(self):
tensorflow.trainable_variables
4,190
import tensorflow as tf do_reuse = True if i > 0 else None with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse): clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse) if lm_coef > 0: train_loss = tf.reduce_mean(clf_losses) + lm_coef*tf.reduce_mean(lm_losses) else: train_loss = tf.reduce_mean(clf_losses) params = find_trainable_variables("model") grads = tf.gradients(train_loss, params) grads = list(zip(grads, params)) gpu_grads.append(grads) gpu_ops.append([clf_logits, clf_losses, lm_losses]) ops = [tf.concat(op, 0) for op in zip(*gpu_ops)] grads = average_grads(gpu_grads) grads = [g for g, p in grads] train = opt_fns[opt](params, grads, lr, partial(lr_schedules[lr_schedule], warmup=lr_warmup), n_updates_total, l2=l2, max_grad_norm=max_grad_norm, vector_l2=vector_l2, b1=b1, b2=b2, e=e)
tensorflow.gradients
4,191
import tensorflow as tf self.deconv_5 = self.deconv_bn_relu(self.deconv_4, name = 'deconv_5',kernel_size = 3, output_channels = 64, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 224*224
tensorflow.contrib.layers.variance_scaling_initializer
4,192
from tensorflow.contrib.learn.python.learn.estimators import test_data self.report_benchmark( iters=metrics['global_step'], extras={k: v for k, v in metrics.items() if k in _METRIC_KEYS}) def benchmarkMatrixData(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
tensorflow.contrib.learn.python.learn.estimators.test_data.prepare_iris_data_for_logistic_regression
4,193
import tensorflow as tf self._moving_variance = tf.get_variable( "moving_variance", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.ones_initializer(), trainable=False) def build_batch_stats(): """Builds the batch statistics calculation ops.""" # We use the moving mean as an estimate of the mean in order to perform # a more numerically stable calculation of the batch mean. # Copy for better stability. shift = tf.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics( input_batch, reduction_indices, keep_dims=True, shift=shift, name="batch_norm_ss") mean, variance = tf.nn.normalize_moments(counts, shifted_sum_x, shifted_sum_x2, shift, name="normalize_moments") return mean, variance
tensorflow.nn.sufficient_statistics
4,194
import tensorflow as tf dtype=tf.float32) U = tf.get_variable(name="attn_U", shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(), # initializer=tf.keras.initializers.lecun_normal(), dtype=tf.float32) self.position_emb = tf.reshape(self.position_emb, [-1, 2 * self.config.hidden_size]) shape = tf.shape(output) output = tf.reshape(output, [-1, 2 * self.config.hidden_size]) atten_hidden = tf.tanh( tf.add( tf.matmul(self.position_emb, W), tf.matmul(output, U))) alpha = tf.nn.softmax( tf.reshape(tf.matmul(atten_hidden, V), [-1, shape[1], 1]), axis=1) output = tf.reshape(output, [-1, shape[1], 2 * self.config.hidden_size]) C = tf.multiply(alpha, output)
tensorflow.reshape
4,195
import tensorflow as tf return output, new_state def update_pos(pos, symbol, max_pos=None): if not decoder.pred_edits: return pos is_keep = tf.equal(symbol, utils.KEEP_ID) is_del = tf.equal(symbol, utils.DEL_ID) is_not_ins = tf.logical_or(is_keep, is_del) pos = beam_search.resize_like(pos, symbol) max_pos = beam_search.resize_like(max_pos, symbol) pos += tf.to_float(is_not_ins) if max_pos is not None: pos = tf.minimum(pos, tf.to_float(max_pos))
tensorflow.logical_or
4,196
import tensorflow as tf name='batching_many_small', sess=session, op_or_tensor=op_to_benchmark, burn_iters=10, min_iters=50) def benchmark_batching_large(self): with tf.Session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(tf.ones([1, 100000]), tf.ones([1, 100000]))) op_to_benchmark = tf.group(*outputs) tf.train.start_queue_runners() self.run_op_benchmark( name='batching_many_large', sess=session, op_or_tensor=op_to_benchmark, burn_iters=10, min_iters=50) if __name__ == '__main__':
tensorflow.ones
4,197
import tensorflow as tf class SaverTest(tf.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval())
tensorflow.Variable
4,198
import tensorflow as tf dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.char_embeddings[2:], dtype=tf.float32), trainable=False) self.char_pad_unk_mat = tf.get_variable("char_unk_pad", [2, self.pretrained_char_mat.get_shape()[1]], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.char_embeddings[:2], dtype=tf.float32), trainable=True) self.char_mat = tf.concat([self.char_pad_unk_mat, self.pretrained_char_mat], axis=0) else:
tensorflow.constant_initializer
4,199