seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf # print(activation.get_shape().as_list()) return activation def batch_norm_conv(x, b_train, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(b_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
tensorflow.nn.moments
3,600
import tensorflow as tf averages_op = averages.apply(ops) for op in ops: tf.scalar_summary(scope_pfix + op.name + raw_pfix, op) tf.scalar_summary(scope_pfix + op.name + avg_pfix, averages.average(op)) with tf.control_dependencies([averages_op]): for i, dep_op in enumerate(dep_ops): dep_ops[i] = tf.identity(dep_op, name=dep_op.name.split(':')[0]) return dep_ops def exp_average(vec, curr_avg, decay=0.9):
tensorflow.control_dependencies
3,601
import tensorflow as tf if self._scale != 1.0: grad = tf.scalar_mul(1. / self._scale, grad) final_gradvar.append((grad, orig_var)) return final_gradvar def apply_gradients(self, *args, **kwargs): return self._optimizer.apply_gradients(*args, **kwargs) def main(argv=None): start1 = time.time() import os os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list if not tf.gfile.Exists(FLAGS.checkpoint_path): tf.gfile.MkDir(FLAGS.checkpoint_path) else: if not FLAGS.restore: tf.gfile.DeleteRecursively(FLAGS.checkpoint_path) tf.gfile.MkDir(FLAGS.checkpoint_path) input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps') if FLAGS.geometry == 'RBOX': input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps') else: input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps')
tensorflow.gfile.Exists
3,602
import tensorflow as tf # Note: tf.nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon, 1 - epsilon) output = tf.log(output) output_shape = output.get_shape() targets = cast(flatten(target), 'int64') logits = tf.reshape(output, [-1, int(output_shape[-1])]) try: res = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=targets, logits=logits) except TypeError: res = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=targets) if len(output_shape) == 3: # if our output includes timesteps we need to reshape return tf.reshape(res, tf.shape(output)[:-1]) else: return res def binary_crossentropy(output, target, from_logits=False): """Binary crossentropy between an output tensor and a target tensor. # Arguments output: A tensor.
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
3,603
import tensorflow as tf Of shape (n_test, n_support) """ rnorm_test = tf.rsqrt( tf.reduce_sum(tf.square(test), 1, keep_dims=True)) + 1e-7 rnorm_support = tf.rsqrt( tf.reduce_sum(tf.square(support), 1, keep_dims=True)) + 1e-7 test_normalized = test * rnorm_test support_normalized = support * rnorm_support # Transpose for mul support_normalized_t = tf.transpose(support_normalized, perm=[1, 0]) g = tf.matmul(test_normalized, support_normalized_t) # Gram matrix return g def elu(x, alpha=1.): """Exponential linear unit. Parameters ----------
tensorflow.transpose
3,604
import tensorflow as tf return p.eval(), new_value def _initAssignSubFetch(self, x, y, use_gpu=False): """Initialize a param to init, and compute param -= y.""" with self.test_session(use_gpu=use_gpu): p = tf.Variable(x) sub = tf.assign_sub(p, y) p.initializer.run() new_value = sub.eval() return p.eval(), new_value def _testTypes(self, vals):
tensorflow.assign_sub
3,605
import tensorflow as tf # Create tensors # Create data to feed in x_vals = np.array([1., 3., 5., 7., 9.]) x_data = tf.placeholder(tf.float32) m = tf.constant(3.) # Multiplication prod = tf.mul(x_data, m) for x_val in x_vals:
tensorflow.constant
3,606
import tensorflow as tf "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files))
tensorflow.FixedLenFeature
3,607
import tensorflow as tf optimizer_vars = set([var.name for var in optimizer.variables()]) prefix = 'resnet%s/' % params['resnet_depth'] resnet_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, prefix) vars_to_load = {}
tensorflow.get_collection
3,608
import tensorflow.contrib.eager as tfe def device(): return "/device:GPU:0" if tfe.num_gpus() else "/device:CPU:0" def data_format(): return "channels_first" if tfe.num_gpus() else "channels_last" def random_dataset(): batch_size = 64 images = tf.random_normal([batch_size, 784])
tensorflow.contrib.eager.num_gpus
3,609
import tensorflow as tf edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes, edge_types) return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \ tf.SparseTensor(*sp_returns[6:]) def sample_fanout(nodes, edge_types, counts, default_node=-1): """ Sample multi-hop neighbors of nodes according to weight in graph. Args: nodes: A 1-D `Tensor` of `int64`.
tensorflow.SparseTensor
3,610
import tensorflow as tf # optimizer & gradients optimizer_base = tf.train.MomentumOptimizer(lrn_rate, FLAGS.momentum) if not FLAGS.enbl_multi_gpu: optimizer = optimizer_base else: optimizer = mgw.DistributedOptimizer(optimizer_base) grads_origin = optimizer.compute_gradients(loss, self.trainable_vars) grads_pruned = self.__calc_grads_pruned(grads_origin) # TF operations & model saver self.sess_train = sess with tf.control_dependencies(self.update_ops): self.train_op = optimizer.apply_gradients(grads_pruned, global_step=self.global_step) self.summary_op = tf.summary.merge_all() self.log_op = [lrn_rate, loss, pr_trainable, pr_maskable] + list(metrics.values()) self.log_op_names = ['lr', 'loss', 'pr_trn', 'pr_msk'] + list(metrics.keys()) self.init_op = tf.variables_initializer(self.vars) self.init_opt_op = tf.variables_initializer(optimizer_base.variables()) if FLAGS.enbl_multi_gpu: self.bcast_op = mgw.broadcast_global_variables(0) self.saver_train = tf.train.Saver(self.vars) def __build_eval(self): """Build the evaluation graph.""" with tf.Graph().as_default(): # create a TF session for the current graph
tensorflow.summary.merge_all
3,611
import tensorflow as tf # Loss value reg_item = tf.contrib.layers.l1_l2_regularizer(L1_reg, L2_reg) reg_term = tf.contrib.layers.apply_regularization(reg_item, self.nnweights) loss_fun = self._negative_log_likelihood(y_, y) loss = loss_fun + reg_term # SGD Optimizer if optimizer == 'sgd': lr = tf.train.exponential_decay( learning_rate, global_step, 1, learning_rate_decay ) train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss, global_step=global_step) elif optimizer == 'adam':
tensorflow.train.exponential_decay
3,612
import tensorflow as tf for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return {
tensorflow.contrib.tpu.TPUEstimatorSpec
3,613
import tensorflow as tf # Assign to yet another shape data2 = tf.fill([10, 10], 1) a2 = tf.assign(p, data2, validate_shape=False) a2.op.run()
tensorflow.assign
3,614
import tensorflow as tf is returned. Arguments: - *indicator*: a 1-dimensional boolean tensor indicating which elements are allowed to be sampled and which are not. - *num_samples*: int32 scalar tensor Returns: A boolean tensor with the same shape as input (indicator) tensor """ indices = tf.where(indicator) indices = tf.random.shuffle(indices) indices = tf.reshape(indices, [-1]) num_samples = tf.minimum(tf.size(indices), num_samples) selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0]) return tf.equal(selected_indicator, 1) def sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5): """Subsamples minibatches to a desired balance of positives and negatives.
tensorflow.random.shuffle
3,615
import tensorflow as tf # pylint: enable=g-import-not-at-top with tf.compat.v1.Graph().as_default() as graph: outputs = { 'foo': tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64), 'bar': tf.convert_to_tensor([0, 2, 0, 2], dtype=tf.int64), }
tensorflow.convert_to_tensor
3,616
from tensorflow.python.ops import math_ops # TODO(zakaria): support weights. def _targets_streaming_mean(unused_predictions, targets): return metrics_lib.streaming_mean(targets) def _predictions_streaming_mean(predictions, unused_targets): return metrics_lib.streaming_mean(predictions) def _streaming_with_threshold(streaming_metrics_fn, threshold): def _streaming_metrics(predictions, targets): return streaming_metrics_fn(predictions=math_ops.to_float( math_ops.greater_equal(predictions, threshold)), labels=targets) return _streaming_metrics class _MetricKeys(object): AUC = "auc" PREDICTION_MEAN = "labels/prediction_mean" TARGET_MEAN = "labels/actual_target_mean" ACCURACY_BASELINE = "accuracy/baseline_target_mean" ACCURACY_MEAN = "accuracy/threshold_%f_mean" PRECISION_MEAN = "precision/positive_threshold_%f_mean"
tensorflow.python.ops.math_ops.greater_equal
3,617
import tensorflow as tf print("Hyper Decoder") ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape) print("Entropy Decoder") def loop_synthesis(element): y = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) x_coori= tf.cast(x_coori,tf.float32) x = synthesis_transform(x_coori,y) return tf.squeeze(x, [0])
tensorflow.expand_dims
3,618
from tensorflow.python.ops import math_ops # "accuracy/threshold_0.500000_mean" metric for binary classification. metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy} predictions = math_ops.sigmoid(logits) targets_float = math_ops.to_float(targets)
tensorflow.python.ops.math_ops.sigmoid
3,619
import tensorflow as tf with tf.device(FLAGS.device): lr = tf.placeholder(tf.float32, shape=[], name="learning_rate") mom = tf.placeholder(tf.float32, shape=[], name="momentum") with tf.variable_scope("CNN") as scope: # Build training graph loss, train_op, global_step, ul_u_updated = build_training_graph( images, labels, ul_images, ul_u, lr, mom)
tensorflow.variable_scope
3,620
import tensorflow as tf label_id = [0]*max_seq_length label_id[start:end]=[1]*(end-start) if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tensorflow.logging.info
3,621
import tensorflow as tf #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else: return output, alphas def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
tensorflow.expand_dims
3,622
import tensorflow as tf annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction") return tf.expand_dims(annotation_pred, dim=3), conv_t3 def train(loss_val, var_list): optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) grads = optimizer.compute_gradients(loss_val, var_list=var_list) if FLAGS.debug: # print(len(var_list)) for grad, var in grads: utils.add_gradient_summary(grad, var)
tensorflow.train.AdamOptimizer
3,623
import tensorflow as tf [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]], num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11 node_a = tf.div(list_of_parts[0], list_of_parts[1]) node_b = tf.divide(list_of_parts[2], list_of_parts[3]) trace_node = tf.trace(node_a) + node_b # there is a broadcast here out_node = tf.cast(tf.count_nonzero(trace_node), dtype=tf.float32) + tf.Variable(tf.random_normal(shape=(2, 3))) placeholders = [in_node_a, in_node_b, in_node_c] predictions = [out_node]
tensorflow.random_normal
3,624
import tensorflow as tf # Reshape input tensors to remove nunroll dim; will briefly restore later during RNN if necessary if cnn_rnn_zack: feats_audio = tf.reshape(feats_audio_nunroll, shape=[batch_size, rnn_nunroll + zack_hack, audio_nbands, audio_nchannels]) else: feats_audio = tf.reshape(feats_audio_nunroll, shape=[batch_size * rnn_nunroll, audio_context_len, audio_nbands, audio_nchannels]) feats_other = tf.reshape(feats_other_nunroll, shape=[batch_size * rnn_nunroll, nfeats]) if mode != 'gen': targets = tf.reshape(targets_nunroll, shape=[batch_size * rnn_nunroll]) target_weights = tf.reshape(target_weights_nunroll, shape=[batch_size * rnn_nunroll]) # CNN
tensorflow.reshape
3,625
from tensorflow.python.ops import math_ops is_false_positive *= weights_tiled is_true_negative *= weights_tiled true_positives_update_op = state_ops.assign_add( true_positives, math_ops.reduce_sum(is_true_positive, 1)) false_negatives_update_op = state_ops.assign_add( false_negatives, math_ops.reduce_sum(is_false_negative, 1)) true_negatives_update_op = state_ops.assign_add( true_negatives, math_ops.reduce_sum(is_true_negative, 1)) false_positives_update_op = state_ops.assign_add( false_positives, math_ops.reduce_sum(is_false_positive, 1))
tensorflow.python.ops.math_ops.reduce_sum
3,626
import tensorflow as tf domain_selection_mask = tf.concat( values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0)
tensorflow.ones
3,627
import tensorflow as tf tf.app.flags.DEFINE_integer('resnet_size', 20, '# of layers in the ResNet model') tf.app.flags.DEFINE_float('nb_epochs_rat', 1.0, '# of training epochs\'s ratio') tf.app.flags.DEFINE_float('lrn_rate_init', 1e-1, 'initial learning rate') tf.app.flags.DEFINE_float('batch_size_norm', 128, 'normalization factor of batch size') tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum coefficient') tf.app.flags.DEFINE_float('loss_w_dcy', 2e-4, 'weight decaying loss\'s coefficient') def forward_fn(inputs, is_train, data_format): """Forward pass function.
tensorflow.app.flags.DEFINE_float
3,628
import tensorflow as tf current_image_id_ph, stable_stage_num_images, transition_stage_num_images, num_blocks=3) x = tf.random_normal([2, 16, 16, 3]) logits, _ = networks.discriminator( x, progress, _num_filters_stub, networks.ResolutionSchedule( start_resolutions=(4, 4), scale_base=2, num_resolutions=3)) fake_loss = tf.reduce_sum(tf.square(logits)) grad_norms = [ _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_1/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_2/.*')), _get_grad_norm( fake_loss, tf.trainable_variables('.*/progressive_gan_block_3/.*')) ] grad_norms_output = None with self.test_session(use_gpu=True) as sess: sess.run(tf.global_variables_initializer()) grad_norms_output = np.array([ sess.run(grad_norms, feed_dict={current_image_id_ph: i})
tensorflow.trainable_variables
3,629
import tensorflow as tf del filters hparams = self.hparams final_filters = common_layers.shape_list(layer)[-1] filters = hparams.hidden_size kernel = (4, 4) if hparams.mode == tf.estimator.ModeKeys.PREDICT: layer_shape = common_layers.shape_list(layer) if hparams.full_latent_tower: rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits]) else: rand = tf.random_uniform(layer_shape[:-3] + [ 1, 1, hparams.bottleneck_bits]) d = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 z = tf.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0 # Embed. x = tf.layers.dense( features["cur_target_frame"], filters, name="latent_embed", bias_initializer=tf.random_normal_initializer(stddev=0.01)) x = common_attention.add_timing_signal_nd(x) if hparams.full_latent_tower: for i in range(hparams.num_compress_steps): with tf.variable_scope("latent_downstride%d" % i):
tensorflow.less
3,630
import tensorflow as tf def _smooth_l1(y_true, y_pred): # y_true [batch_size, num_anchor, 4+1] # y_pred [batch_size, num_anchor, 4] regression = y_pred regression_target = y_true[:, :, :-1] anchor_state = y_true[:, :, -1] # 找到正样本 indices = tf.where(keras.backend.equal(anchor_state, 1)) regression = tf.gather_nd(regression, indices) regression_target = tf.gather_nd(regression_target, indices) # 计算 smooth L1 loss # f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma # |x| - 0.5 / sigma / sigma otherwise regression_diff = regression - regression_target regression_diff = keras.backend.abs(regression_diff) regression_loss = tf.where(
tensorflow.gather_nd
3,631
import tensorflow as tf """Preprocessing for WMT: filter exceeding maximum length and concatenate.""" dataset = wmt_preprocess(dataset, training, max_length, max_eval_length) def concat_and_add_mask(features, targets): inp = features['inputs'] pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0) concat = tf.concat([inp, pad, targets], axis=0) mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0) features['inputs'] = concat features['mask'] = mask
tensorflow.zeros_like
3,632
import tensorflow as tf #init=tf.initialize_all_variables() def train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False): with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if restore: tf.train.Saver().restore(sess,path) feed_dict={ testnum: test_num, trainnum: train_num,
tensorflow.train.start_queue_runners
3,633
import tensorflow as tf from __future__ import print_function import numpy as np import tensorflow as tf import TensorFI as ti # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # In this example, we limit mnist data Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) Xte, Yte = mnist.test.next_batch(200) #200 for testing # tf Graph Input xtr = tf.placeholder("float", [None, 784]) xte = tf.placeholder("float", [784]) # Nearest Neighbor calculation using L1 Distance # Calculate L1 Distance distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) # Prediction: Get min distance index (Nearest neighbor) pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer()
tensorflow.placeholder
3,634
import tensorflow as tf rotation_z_2 = tf.reshape(box2[-1], [1]) length_1 = tf.reshape(box1[3 + 0], [1]) height_1 = tf.reshape(box1[3 + 2], [1]) width_1 = tf.reshape(box1[3 + 1], [1]) length_2 = tf.reshape(box2[3 + 0], [1]) height_2 = tf.reshape(box2[3 + 2], [1]) width_2 = tf.reshape(box2[3 + 1], [1]) iou = np.squeeze(np_box_ops.iou3d_7dof_box( length_1, height_1, width_1, center_1, rotation_z_1, length_2, height_2, width_2, center_2, rotation_z_2))
tensorflow.reshape
3,635
import tensorflow as tf x=x, h2=l1_h2, layer='h1', layer_idx=0) # Intermediate FF if self.batch_norm: with tf.variable_scope( 'l1_h2_bn', reuse=self.scope_reuse) as scope: l1_h2 = tf.contrib.layers.batch_norm( inputs=l1_h2, scale=True, center=True, fused=True, renorm=False, param_initializers=self.param_initializer, updates_collections=None, scope=scope, reuse=self.reuse,
tensorflow.contrib.layers.batch_norm
3,636
import tensorflow as tf self._TokenToString(tokens[0]) + self._TokenToString(tokens[1])) def _EncodeToIds(self, word): # Below: # * a token is a wordpiece ID. # * the tokens array will be merged in-place. # * the candidates array is an array of size len(tokens) - 1. # It contains the token for the merged wordpiece, if it exists, # -1 otherwise. For instance, candidate[3] = id(token[3] + token[4]). # First, split into basic UTF-8 characters (letters). chars = tf.strings.unicode_split(word, 'UTF-8') tokens = self._StringToToken(chars) tokens = tf.where( tf.equal(tokens, NO_TOKEN), # Unseen character. tf.broadcast_to(self.unk_id, tf.shape(tokens)), tokens) # Create initial candidate list. candidates = tf.map_fn( self._MergeTokens, (tokens[:-1], tokens[1:]), dtype=tokens.dtype) def _ShouldMerge(unused_tokens, candidates): """Merge until not possible, or we abort early according to merge_prob.""" return tf.logical_and( tf.reduce_any(tf.not_equal(candidates, NO_TOKEN)), tf.random.uniform([]) < self._merge_prob)
tensorflow.equal
3,637
from tensorflow.python.ops import array_ops # Accumulate the prediction to current confusion matrix. current_cm = confusion_matrix_ops.confusion_matrix( predictions, labels, num_classes, weights=weights, dtype=cm_dtype) update_op = state_ops.assign_add(total_cm, current_cm) def compute_mean_iou(name): """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0)) sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1)) cm_diag = math_ops.to_float(array_ops.diag_part(total_cm)) denominator = sum_over_row + sum_over_col - cm_diag # If the value of the denominator is 0, set it to 1 to avoid # zero division. denominator = math_ops.select( math_ops.greater(denominator, 0), denominator, array_ops.ones_like(denominator))
tensorflow.python.ops.array_ops.diag_part
3,638
import tensorflow as tf def batch_norm_conv(x, b_train, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(b_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed def add_dense_layer(layer, filter_dims, act_func=tf.nn.relu, scope='dense_layer', use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]): with tf.variable_scope(scope): l = layer
tensorflow.identity
3,639
import tensorflow as tf backward_cell, logits, dtype=tf.float32 ) encoder_outputs = tf.concat(encoder_outputs, axis=2) logits = tf.reshape(tf.layers.dense(encoder_outputs, units=num_classes), [-1, bil_lstm_win_size, num_classes]) return logits def cnn_model(x, amp_factor=1): with tf.variable_scope('model'): conv1 = tf.layers.conv2d(x, filters=32*amp_factor, kernel_size=[5, 3], data_format='channels_last', padding= "same", strides=(2, 1), activation=tf.nn.relu) pool1 = conv1 conv2 = tf.layers.conv2d(pool1, filters=64*amp_factor, kernel_size=[5, 1], data_format='channels_last', padding= "same", strides=(2, 1),
tensorflow.variable_scope
3,640
import tensorflow as tf run_config = tf.estimator.RunConfig( save_summary_steps=1, train_distribute=strategy, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_ckpt_steps, log_step_count_steps=1, ) else: distribution = tf.contrib.distribute.MirroredStrategy( num_gpus=FLAGS.num_gpus ) run_config = tf.estimator.RunConfig(train_distribute=distribution) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint,
tensorflow.contrib.distribute.MirroredStrategy
3,641
import tensorflow as tf # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
tensorflow.variable_scope
3,642
import tensorflow as tf indices = tf.transpose( tf.stack([orig_indices, tf.squeeze(relabel_indices)], axis=0)) relabel_q_vals = tf.gather_nd(logits_vec, indices) tf.compat.v2.summary.scalar( name="relabel_q_vals", data=tf.reduce_mean(relabel_q_vals), step=global_step, ) max_q = tf.reduce_max(logits_vec, axis=1)
tensorflow.reduce_mean
3,643
from tensorflow.contrib.distributions.python.ops import distribution_util Args: x: `Tensor` of shape `B_+E_+S_`. sample_shape: `Tensor` (1D, `int32`). name: `String`. The name to give this op. Returns: x: `Tensor`. Input transposed/reshaped to `S+B+E`. """ with self._name_scope(name, values=[x, sample_shape]): x = ops.convert_to_tensor(x, name="x") sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape") x = distribution_util.rotate_transpose(x, shift=1) if self._is_all_constant_helper(self.batch_ndims, self.event_ndims): if self._batch_ndims_is_0 or self._event_ndims_is_0: b = ((min(-2, -1 - self._event_ndims_static),) if self._batch_ndims_is_0 else ()) e = (-1,) if self._event_ndims_is_0 else () x = array_ops.squeeze(x, squeeze_dims=b + e) _, batch_shape, event_shape = self.get_shape(x) else: s = (x.get_shape().as_list() if x.get_shape().is_fully_defined() else array_ops.shape(x)) batch_shape = array_ops.slice(s, (1,), (self.batch_ndims,))
tensorflow.contrib.distributions.python.ops.distribution_util.rotate_transpose
3,644
from tensorflow.python.framework import ops default_name = _at_k_name('false_negative', k, class_id=class_id) with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
tensorflow.python.framework.ops.name_scope
3,645
import tensorflow.contrib as contrib stitch3_1, stitch3_2 = fc3_1, fc3_2 dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training, scope="dropout3_1")
tensorflow.contrib.layers.dropout
3,646
import tensorflow as tf decoder_depth: Integer, the filters used in the convolution. decoder_use_separable_conv: Boolean, use separable conv or not. weight_decay: Weight decay for the model variables. scope_suffix: String, used in the scope suffix. Returns: decoder features merged with concatenation. """ if decoder_use_separable_conv: decoder_features = split_separable_conv2d( tf.concat(decoder_features_list, 3), filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv0'+scope_suffix) decoder_features = split_separable_conv2d( decoder_features, filters=decoder_depth, rate=1, weight_decay=weight_decay,
tensorflow.concat
3,647
import tensorflow as tf None, ] + tu.shape_to_tf_shape(input_shape), "INPUT0") in1 = tf.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "INPUT1") # If the input is a string, then convert each string to the # equivalent int32 value. if tf_input_dtype == tf.string: in0 = tf.strings.to_number(in0, tf.int32) in1 = tf.strings.to_number(in1, tf.int32) add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else:
tensorflow.add
3,648
import tensorflow as tf tf.less( matched_iou, self._config_dict['foreground_iou_threshold'])) background_indicator = tf.logical_or(negative_matches, ignored_matches) # re-assign negatively matched boxes to the background class. matched_gt_boxes = tf.where( tf.tile(tf.expand_dims(background_indicator, -1), [1, 1, 4]), tf.zeros_like(matched_gt_boxes), matched_gt_boxes) matched_gt_classes = tf.where( background_indicator, tf.zeros_like(matched_gt_classes), matched_gt_classes) matched_gt_indices = tf.where( background_indicator,
tensorflow.zeros_like
3,649
from tensorflow.python.platform import gfile from tensorflow.python.ops import io_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import gfile from tensorflow.python.platform import test def _resize_image(image, height, width): image = array_ops.expand_dims(image, 0) image = image_ops.resize_bilinear(image, [height, width]) return array_ops.squeeze(image, [0]) def _create_tfrecord_dataset(tmpdir): if not gfile.Exists(tmpdir): gfile.MakeDirs(tmpdir) data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1) keys_to_features = { 'image/encoded': parsing_ops.FixedLenFeature( shape=(), dtype=dtypes.string, default_value=''), 'image/format': parsing_ops.FixedLenFeature( shape=(), dtype=dtypes.string, default_value='jpeg'), 'image/class/label':
tensorflow.python.platform.gfile.Exists
3,650
import tensorflow as tf actor = Actor(sess, ACTION_DIM, ACTION_BOUND, LR_A, REPLACE_ITER_A) critic = Critic(sess, STATE_DIM, ACTION_DIM, LR_C, GAMMA, REPLACE_ITER_C, actor.a, actor.a_) actor.add_grad_to_graph(critic.a_grads) M = Memory(MEMORY_CAPACITY) saver = tf.train.Saver(max_to_keep=100) if LOAD_MODEL: all_ckpt = tf.train.get_checkpoint_state('./data', 'checkpoint').all_model_checkpoint_paths saver.restore(sess, all_ckpt[-1]) else: if os.path.isdir(DATA_PATH): shutil.rmtree(DATA_PATH) os.mkdir(DATA_PATH) sess.run(tf.global_variables_initializer()) if OUTPUT_GRAPH: tf.summary.FileWriter('logs', graph=sess.graph)
tensorflow.train.get_checkpoint_state
3,651
import tensorflow as tf gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
tensorflow.constant_initializer
3,652
import tensorflow as tf decoder_width = scale_dimension(model_options.crop_size[1], 1.0 / model_options.decoder_output_stride) else: decoder_height = scale_dimension(tf.shape(images)[1], 1.0 / model_options.decoder_output_stride) decoder_width = scale_dimension(tf.shape(images)[2],
tensorflow.shape
3,653
import tensorflow as tf host_images.get_shape(), dtype=input_data_type, stddev=1e-1, name='synthetic_images') images = tf.contrib.framework.local_variable( images, name='gpu_cached_images') labels = host_labels
tensorflow.contrib.framework.local_variable
3,654
import tensorflow as tf normalizer_fn=tf.contrib.layers.batch_norm, normalizer_params={"is_training": self.train}): self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"]) self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None) # Compute loss
tensorflow.contrib.layers.fully_connected
3,655
import tensorflow as tf def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length],
tensorflow.constant
3,656
import tensorflow as tf update_op = tf.group(*[ param.assign(param - grad * self.LEARNING_RATE) for param, grad in zip(params, grads) ]) # return update_op with tf.name_scope('validate'): x, y = self._build_data_pipeline() y_hat, loss = self._build_validation_model(x, y) with tf.control_dependencies([update_op]): return tf.print('expect', loss, y, y_hat, summarize=50) class DataOwner: BATCH_SIZE = 30 def __init__(self, player_name, build_training_model): self.player_name = player_name
tensorflow.control_dependencies
3,657
import tensorflow as tf return optimizer.apply_gradients(grads) def main(argv=None): keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image") #debug
tensorflow.placeholder
3,658
import tensorflow as tf total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) # add summary if reuse_variables is None: tf.summary.image('input', images) tf.summary.image('score_map', score_maps) tf.summary.image('score_map_pred', f_score * 255) tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
tensorflow.summary.image
3,659
import tensorflow as tf """Build dynamic graph""" rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state) """Add prediction layer""" with tf.variable_scope('softmax'): W = tf.get_variable('W', [state_size, input_size_y]) b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0)) rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size]) predictions = tf.matmul(rnn_outputs, W) + b yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder "Mean squared error loss" loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1])))
tensorflow.constant_initializer
3,660
import tensorflow as tf with tf.variable_scope('rnn_unroll'): state = initial_state outputs = [] for i in xrange(rnn_nunroll): if i > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(rnn_inputs[i], state) outputs.append(cell_output) final_state = state
tensorflow.get_variable_scope
3,661
import tensorflow as tf regularizer=regularizer) b = tf.get_variable('b', [2048], initializer=tf.constant_initializer(1.0)) out = tf.matmul(self.flatten, w) + b self.fc1 = tf.nn.relu(out) # fc2
tensorflow.nn.relu
3,662
import tensorflow as tf from metric import tf_metrics from optimizer import distributed_optimizer as optimizer from model_io import model_io from distillation import knowledge_distillation as distill def correlation(x, y): x = x - tf.reduce_mean(x, axis=-1, keepdims=True) y = y - tf.reduce_mean(y, axis=-1, keepdims=True) x = tf.nn.l2_normalize(x, -1) y = tf.nn.l2_normalize(y, -1) return -tf.reduce_sum(x*y, axis=-1) # higher the better def kd(x, y): x_prob = tf.nn.softmax(x) print(x_prob.get_shape(), y.get_shape(), tf.reduce_sum(x_prob * y, axis=-1).get_shape()) return -tf.reduce_sum(x_prob * y, axis=-1) # higher the better def mse(x, y): x = x - tf.reduce_mean(x, axis=-1, keepdims=True) y = y - tf.reduce_mean(y, axis=-1, keepdims=True)
tensorflow.nn.l2_normalize
3,663
from tensorflow.python.ops import math_ops return math_ops.square(logits - math_ops.to_float(target)) def _log_loss_with_two_classes(logits, target): # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target. if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) loss_vec = nn.sigmoid_cross_entropy_with_logits( labels=math_ops.to_float(target), logits=logits) return loss_vec def _softmax_cross_entropy_loss(logits, target): # Check that we got integer for classification. if not target.dtype.is_integer: raise ValueError("Target's dtype should be integer "
tensorflow.python.ops.math_ops.to_float
3,664
import tensorflow as tf pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x) return tf.where( tf.equal(degree_l, order_m), pmm, _evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm)) def _spherical_harmonics_normalization(l, m, var_type=tf.float64): l = tf.cast(l, dtype=var_type) m = tf.cast(m, dtype=var_type) numerator = (2.0 * l + 1.0) * factorial(l - tf.abs(m)) denominator = 4.0 * np.pi * factorial(l + tf.abs(m)) return tf.sqrt(numerator / denominator) def _evaluate_spherical_harmonics_branch(degree, order,
tensorflow.cast
3,665
import tensorflow as tf out_vocab_size, c2v) in_size = self._inputs[0].get_shape()[1].value # Also, output confidence scores at every word. confidence_mat = tf.get_variable('confidence_mat', [in_size, 1]) confidence_scores = tf.concat(1, [tf.matmul(o_, confidence_mat) for o_ in self._inputs]) # dropout on confidence_scores random_tensor = (1.0 - self._dropout_keep_prob +
tensorflow.matmul
3,666
import tensorflow as tf # TensorFlow while loop def body(deltas, previous_perturbations): with tf.control_dependencies(control_inputs=deltas): perturbations = [
tensorflow.control_dependencies
3,667
import tensorflow as tf INF = 1e30 class cudnn_gru: def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope=None): self.num_layers = num_layers self.grus = [] self.inits = [] self.dropout_mask = [] for layer in range(num_layers): input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units) gru_bw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units) init_fw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1]) init_bw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1]) mask_fw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, ))
tensorflow.contrib.cudnn_rnn.CudnnGRU
3,668
import tensorflow as tf def body(self, features): exp_coupling = ["affine", "additive"] if self.hparams.coupling not in exp_coupling: raise ValueError("Expected hparams.coupling to be in %s, got %s" % (exp_coupling, self.hparams.coupling)) if self.is_training: init_features = self.create_init_batch(features) init_op = self.objective_tower(init_features, init=True) init_op = tf.Print( init_op, [init_op], message="Triggering data-dependent init.", first_n=20) tf.add_to_collection("glow_init_op", init_op) train_op = self.objective_tower(features, init=False) return tf.zeros_like(features["targets"]), {"training": train_op} def objective_tower(self, features, init=True): """Objective in terms of bits-per-pixel. Args: features: dict of tensors with "features" and "targets" keys. init: Whether or not to run data-dependent init. Returns: objective: float, bits-per-pixel. """ x = features["inputs"] # Scale x such that the pixels lie in-between -0.5 and.0.5
tensorflow.zeros_like
3,669
import tensorflow as tf # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(MODEL_DIR, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) cv2.imwrite("output.png", result[0]) tf.reset_default_graph() #return FileResponse("output.png", media_type="image/png") with open("output.png", "rb") as image_file: image_string = "data:image/png;base64,{}".format(base64.b64encode(image_file.read()).decode()) return { "image": image_string } if __name__ == '__main__': uvicorn.run(app, host="0.0.0.0", port=8080)
tensorflow.reset_default_graph
3,670
import tensorflow as tf # execute at test time return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon) return tf.cond(train, func1, func2) def average_gradients(tower_grads):
tensorflow.cond
3,671
import tensorflow as tf # // --- Build Argus background PDF --- # RooRealVar argpar("argpar","argus shape parameter",-20.0,-100.,-1.) ; # RooConstVar m0("m0", "resonant mass", 5.291); argpar = tf.Variable(argpar_num, name="argpar", dtype=tf.float64) m0 = tf.constant(m0_num, name="m0", dtype=tf.float64) vdict['argpar'] = argpar # RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ;
tensorflow.Variable
3,672
import tensorflow as tf """ Check that the KL divergence matches a 1D by-hand calculation. """ m = 0 mu1d = mu[m,:][None,:] # 1 x N s1d = sqrt[:,m,m][:,None,None] # N x 1 x 1 K1d = K_batch[:,m,m][:,None,None] # N x 1 x 1 kl = gauss_kl(mu1d,s1d,K1d if not white else None) kl_tf = tf_kl_1d(tf.reshape(mu1d,(-1,)), # N tf.reshape(s1d,(-1,)), # N None if white else tf.reshape(K1d,(-1,))) # N np.testing.assert_allclose(kl.eval(), kl_tf.eval()) if __name__ == "__main__": tf.test.main()
tensorflow.reshape
3,673
import tensorflow as tf # for both the shortcut and non-shortcut paths as part of the first # block's projection. Cf. Appendix of [2]. if self.resnet_version == 1: inputs = batch_norm(inputs, training, self.data_format) inputs = tf.nn.relu(inputs) if self.first_pool_size: inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) inputs = tf.identity(inputs, 'initial_max_pool') for i, num_blocks in enumerate(self.block_sizes):
tensorflow.layers.max_pooling2d
3,674
import tensorflow as tf .FullyConnected('fc2', out_dim=128, nl=tf.nn.relu) .FullyConnected('fct', out_dim=19, nl=tf.identity)()) tf.nn.softmax(logits, name='prob') cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector')
tensorflow.nn.sparse_softmax_cross_entropy_with_logits
3,675
import tensorflow as tf self.variable_mgr.append_apply_gradients_ops( gradient_state, opt, clipped_grads, training_ops) train_op = tf.group(*(training_ops + update_ops + extra_nccl_ops)) with tf.device(self.cpu_device): if self.task_index == 0 and FLAGS.summary_verbosity > 0: tf.summary.scalar('learning_rate', learning_rate) tf.summary.scalar('total_loss', total_loss) for grad, var in avg_grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) fetches = [train_op, total_loss] + enqueue_ops return (enqueue_ops, fetches)
tensorflow.summary.scalar
3,676
import tensorflow as tf if isinstance(tensors, np.ndarray): tensors = (tensors,) if isinstance(tensors, list): for i in range(len(tensors)): if tensors[i].dtype == np.dtype("float64"): tensors[i] = np.float32(tensors[i]) data_list = _splits(tensors) rdd = sc.parallelize(data_list, splits) tensor_structure = [TensorMeta(tf.as_dtype(t.dtype), shape=t.shape[1:], name="input_%s" % i) for i, t in enumerate(tensors)] else: flattened = nest.flatten(tensors) for i in range(len(flattened)): if flattened[i].dtype == np.dtype("float64"): flattened[i] = np.float32(flattened[i]) data_list = _splits(flattened)
tensorflow.as_dtype
3,677
import tensorflow as tf self.color_matrix = tf.convert_to_tensor([[1, 0, 0], [0, 1, 0], [-0.395913, 0.801109, 0]]) else: raise("ERROR: invalid type passed into Simulator class (only accepts 'D', 'P', or 'T')") self.rgb2lms = tf.convert_to_tensor([[17.8824, 43.5161, 4.11935], [3.45565, 27.1554, 3.86714], [0.0299566, 0.184309, 1.46709]]) def simulate_image(self, image): # passes an image through the color-blindness simulator inverted_rgb2lms = tf.linalg.inv(self.rgb2lms) product1 = tf.matmul(inverted_rgb2lms, self.color_matrix) product2 = tf.matmul(product1, self.rgb2lms) original_image_shape = image.shape simulated_image = tf.transpose(tf.matmul(product2, tf.reshape(tf.transpose(image, perm=[2, 0, 1]), (image.shape[2], image.shape[0] * image.shape[1]))), perm=[1, 0]) return tf.reshape(simulated_image, original_image_shape)
tensorflow.matmul
3,678
import tensorflow as tf def testTiedRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
tensorflow.constant_initializer
3,679
import tensorflow as tf if self.maxnorm is not None: # Post-processing to limit embedding vars to L2 ball rel_maxnorm = self.maxnorm * self.rel_maxnorm_mult unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0] unique_rel_indices = tf.unique(self.rel_input)[0] entity_constraint = self._norm_constraint_op(self.entity_embedding_vars,
tensorflow.concat
3,680
import tensorflow as tf # Add histograms for gradients. for grad, var in grads: if grad is not None: summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad)) # Apply the gradients to adjust the shared variables. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): summaries.append(tf.summary.histogram(var.op.name, var)) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # Group all updates to into a single train op. train_op = tf.group(apply_gradient_op, variables_averages_op) # Create a saver. saver = tf.train.Saver(tf.global_variables()) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries) # Build an initialization operation to run below.
tensorflow.train.ExponentialMovingAverage
3,681
import tensorflow as tf # create a TF session for the current graph config = tf.ConfigProto() if FLAGS.enbl_multi_gpu: config.gpu_options.visible_device_list = str(mgw.local_rank()) # pylint: disable=no-member else: config.gpu_options.visible_device_list = '0' # pylint: disable=no-member sess = tf.Session(config=config) # data input pipeline with tf.variable_scope(self.data_scope): iterator = self.build_dataset_train() images, labels = iterator.get_next() # model definition - distilled model if FLAGS.enbl_dst: logits_dst = self.helper_dst.calc_logits(sess, images) # model definition - weight-sparsified model
tensorflow.variable_scope
3,682
import tensorflow as tf for m in tf.range(start=0, limit=tf.size(y), delta=1, dtype=None, name='m_range'): vx = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) x_t = tf.gather(x, l) x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'):
tensorflow.Variable
3,683
import tensorflow as tf # Define your AdamOptimiser, using FLAGS.learning_rate to minimixe the loss function decayed_learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, tf.Variable(0, trainable=False), 1000, 0.8) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy) # calculate the prediction and the accuracy accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1))
tensorflow.train.AdamOptimizer
3,684
import tensorflow as tf with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****")
tensorflow.logging.info
3,685
import tensorflow as tf crop_h = h//self.crop_factor crop_w = w//self.crop_factor rgb = tf.image.random_crop(rgb,size=[crop_h,crop_w,c]) # cast, bit conversion, compress domain, center rgb = tf.cast(rgb, tf.float32) if n_bits < 8: rgb = tf.floor(rgb/(2**(8-n_bits))) rgb = rgb/(n_bins) - 0.5
tensorflow.cast
3,686
import tensorflow as tf with slim.arg_scope( [slim.model_variable, slim.variable], device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label, inp=[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]], Tout=[tf.float32, tf.float32]) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5]) gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6]) img = inputs_list[i][0] img_shape = inputs_list[i][-2:]
tensorflow.py_func
3,687
import tensorflow as tf trainable_var = tf.trainable_variables() if FLAGS.debug: for var in trainable_var: utils.add_to_regularization_and_summary(var) train_op = train(loss, trainable_var) print("Setting up summary op...") summary_op = tf.summary.merge_all() print("Setting up image reader...") train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir) print(len(train_records)) print(len(valid_records)) print("Setting up dataset reader")
tensorflow.summary.merge_all
3,688
from tensorflow.python.framework import ops # avoid division by zero epsilon = 1e-7 def compute_precision(name): precision = math_ops.div(true_positives, epsilon + true_positives + false_positives, name='precision_' + name) return precision precision = compute_precision('value') with ops.control_dependencies([true_positives_compute_op, false_positives_compute_op]): update_op = compute_precision('update_op') if metrics_collections: ops.add_to_collections(metrics_collections, precision) if updates_collections: ops.add_to_collections(updates_collections, update_op) return precision, update_op def streaming_recall_at_thresholds(predictions, labels, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes various recall values for different `thresholds` on `predictions`. The `streaming_recall_at_thresholds` function creates four local variables,
tensorflow.python.framework.ops.add_to_collections
3,689
from tensorflow.python.ops import array_ops super(VitisAveragePooling2D, self).build(input_shape) # Compute rescale factor in build() since the pool_size is determined. self.rescale_factor = _get_avgpool_scale(self.pool_size[0], self.pool_size[1]) def call(self, inputs): outputs = super(VitisAveragePooling2D, self).call(inputs) # Simulate DPU hahavior of AvgPooling input_shape = array_ops.shape(inputs) if self.rescale_factor != 1.0: outputs *= self.rescale_factor return outputs def _types_dict(): return {
tensorflow.python.ops.array_ops.shape
3,690
import tensorflow as tf sess = tf.Session() # 上面的wtih或者是name都是可选的,可以选择添加,也可以选择不添加,but下面的这一行是一定要写的。 # 这个表明了 在当前的目录下面创建以恶搞logs的文件家,然后把图的信息保存进去 # 这样运行完这段代码之后,就会有一个logs的文件夹被创建 if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: # tensorflow version < 0.12 writer = tf.train.SummaryWriter('logs/', sess.graph) else: # tensorflow version >= 0.12 writer = tf.summary.FileWriter("logs/", sess.graph)
tensorflow.__version__.split
3,691
import tensorflow as tf ): values = [values] if spec.is_int_feature: feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) else: feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) features[spec.name] = feature return tf.train.Example(features=tf.train.Features(feature=features)) def _input_fn_builder(self, input_file, is_training): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function."""
tensorflow.train.Features
3,692
import tensorflow as tf # TODO: good distribution to init training. # TODO: support shape and tt_ranks as TensorShape?. # TODO: support None as a dimension. shape = np.array(shape) tt_rank = np.array(tt_rank) _validate_input_parameters(is_tensor=True, shape=shape, tt_rank=tt_rank) num_dims = shape.size if tt_rank.size == 1: tt_rank = tt_rank * np.ones(num_dims - 1) tt_rank = np.insert(tt_rank, 0, 1) tt_rank = np.append(tt_rank, 1) tt_rank = tt_rank.astype(int) tt_cores = [None] * num_dims with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (tt_rank[i], shape[i], tt_rank[i + 1]) tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrain(tt_cores, shape, tt_rank) def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1, mean=0., stddev=1., dtype=tf.float32, name='t3f_tensor_batch_with_random_cores'): """Generate a batch of TT-tensors of given shape with N(mean, stddev^2) cores.
tensorflow.name_scope
3,693
import tensorflow as tf tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D_val['logits'], targets, 1))) self.error_rate = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D['class_logits'], targets, 1))) if gpu_idx == 0:
tensorflow.nn.in_top_k
3,694
import tensorflow as tf class ValueEstimator_MountainCarContinuous(): def __init__(self, learning_rate=0.1, par_idx=0,scope="value_estimator"): w_init = tf.random_normal_initializer(0.,.1); with tf.variable_scope(scope+"_"+str(par_idx)): # state and target self.state = tf.placeholder(tf.float32, [None,400], "state") self.target = tf.placeholder(tf.float32, [None,1], name="target") # layers self.value_estimate = tf.layers.dense(self.state, 1, kernel_initializer=w_init, name='v') # estimated value for state
tensorflow.placeholder
3,695
import tensorflow as tf max_len - x_len, ] zeros = tf.zeros(pad_shape, dtype=x[key].dtype) x[key] = tf.concat([x[key], zeros], 0) return x
tensorflow.concat
3,696
import tensorflow as tf if encoder.conv_filters: encoder_inputs_ = tf.expand_dims(encoder_inputs_, axis=3)
tensorflow.expand_dims
3,697
import tensorflow as tf name='linear_projection_1_bn') activation = tf.nn.relu(projection) activation = dropout(activation, self.dropout_keep_prob) projection = linear( input=activation, input_size=dialogue_state_size, output_size=dialogue_state_size, name='linear_projection_2' ) projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train, name='linear_projection_2_bn') activation = tf.nn.relu(projection) activation = dropout(activation, self.dropout_keep_prob) projection = linear( input=activation, input_size=dialogue_state_size, output_size=action_templates_vocabulary_length, name='linear_projection_3_predictions_action' ) self.predictions_action = tf.nn.softmax(projection, name="softmax_output_prediction_action") # argument prediction
tensorflow.nn.relu
3,698
import tensorflow as tf self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
tensorflow.reduce_mean
3,699