seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf base_dir = osp.expanduser(root_dir) if not tf.io.gfile.exists(base_dir): tf.io.gfile.makedirs(base_dir) tag = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') tb_logdir = osp.join(base_dir, tag, 'tb') save_dir = osp.join(base_dir, tag, 'train') tf.io.gfile.makedirs(tb_logdir) tf.io.gfile.makedirs(save_dir) writer = tf.contrib.summary.create_file_writer(tb_logdir) writer.set_as_default() return writer, save_dir
tensorflow.io.gfile.makedirs
3,700
import tensorflow as tf # Also, we can limit the size of GPU memory used, with the following option config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config) # How to set placements on multiple devices. # Here, assume we have three devies CPU:0, GPU:0, and GPU:1 if tf.test.is_built_with_cuda(): with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1, 3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:1'): c = tf.matmul(a,b) c = tf.reshape(c, [-1])
tensorflow.test.is_built_with_cuda
3,701
import tensorflow as tf scales_to_logits[MERGED_LOGITS_SCOPE].dtype) predictions[output] = tf.argmax(logits, 3, output_type=tf.dtypes.int32) #predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits) else: argmax_results = tf.argmax(logits, 3, output_type=tf.dtypes.int32) argmax_results = tf.image.resize_nearest_neighbor( tf.expand_dims(argmax_results, 3), tf.shape(images)[1:3],
tensorflow.argmax
3,702
import tensorflow as tf if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T]
tensorflow.nn.softmax
3,703
import tensorflow as tf if pool_first: if in_channel == filters: if strides == 1: shortcut = tf.identity(x) else: shortcut= tf.pad(x, tf.constant([[0,0],[1,1],[1,1],[0,0]]), "CONSTANT") shortcut = tf.nn.max_pool(shortcut, [1, strides, strides, 1], [1, strides, strides, 1], 'VALID') else: shortcut = self._conv('shortcut_conv', x, padding='VALID', num_filters=filters, kernel_size=(1, 1), stride=(strides, strides), bias=self.bias) else: if dilation != 1:
tensorflow.nn.max_pool
3,704
from tensorflow.python.framework import tensor_util if self.validate_args: x = control_flow_ops.with_dependencies([ check_ops.assert_rank(x, 0), check_ops.assert_non_negative(x)], x) return x def _introspect_ndims(self, ndims): """Helper to establish some properties of input ndims args.""" if self._is_all_constant_helper(ndims): return (tensor_util.constant_value(ndims), tensor_util.constant_value(ndims) == 0) return None, math_ops.equal(ndims, 0)
tensorflow.python.framework.tensor_util.constant_value
3,705
import tensorflow as tf parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_string('trace_file', None, """Enable TensorFlow tracing and write trace to
tensorflow.flags.DEFINE_string
3,706
from tensorflow.python.ops import math_ops return None else: return array_ops.reshape( math_ops.to_float(features[self._weight_column_name]), shape=(-1,)) @property
tensorflow.python.ops.math_ops.to_float
3,707
import tensorflow as tf flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor
tensorflow.reshape
3,708
from tensorflow.python.framework import ops ops.RegisterShape("Abs")(common_shapes.unchanged_shape) ops.RegisterShape("Ceil")(common_shapes.unchanged_shape) ops.RegisterShape("Conj")(common_shapes.unchanged_shape) ops.RegisterShape("Cos")(common_shapes.unchanged_shape) ops.RegisterShape("Exp")(common_shapes.unchanged_shape) ops.RegisterShape("Floor")(common_shapes.unchanged_shape) ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape) ops.RegisterShape("IsNan")(common_shapes.unchanged_shape) ops.RegisterShape("Log")(common_shapes.unchanged_shape) ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
tensorflow.python.framework.ops.RegisterShape
3,709
import tensorflow as tf inputs = tf.random_uniform((batch_size, height, width, 3)) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith( 'MobilenetV1/Logits/SpatialSqueeze')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('Predictions' in end_points) self.assertListEqual(end_points['Predictions'].get_shape().as_list(), [batch_size, num_classes]) def testBuildPreLogitsNetwork(self): batch_size = 5 height, width = 224, 224 num_classes = None inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024]) self.assertFalse('Logits' in end_points) self.assertFalse('Predictions' in end_points) def testBuildBaseNetwork(self): batch_size = 5 height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = mobilenet_v1.mobilenet_v1_base(inputs) self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13')) self.assertListEqual(net.get_shape().as_list(),
tensorflow.random_uniform
3,710
import tensorflow as tf Based on: https://github.com/gitlimlab/CycleGAN-Tensorflow/blob/master/ops.py For tf padding, refer to: https://www.tensorflow.org/api_docs/python/tf/pad """ reg_l2 = tf.keras.regularizers.l2(5e-7) if padding == 'SYMMETRIC' or padding == 'REFLECT': p = (kernel_size - 1) // 2
tensorflow.keras.regularizers.l2
3,711
from tensorflow.python.ops import array_ops if n_classes < 2: raise ValueError("n_classes must be >= 2") super(_MultiClassTargetColumn, self).__init__( loss_fn=loss_fn, num_label_columns=1 if n_classes == 2 else n_classes, label_name=label_name, weight_column_name=weight_column_name) def logits_to_predictions(self, logits, proba=False): if self.num_label_columns == 1: logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits]) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _default_eval_metrics(self): if self._num_label_columns == 1: return _get_default_binary_metrics_for_eval(thresholds=[.5])
tensorflow.python.ops.array_ops.zeros_like
3,712
import tensorflow as tf # Note: tf.nn.softmax_cross_entropy_with_logits # expects logits, Keras expects probabilities. if not from_logits: # transform back to logits epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon, 1 - epsilon) output = tf.log(output / (1 - output)) try: return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) except TypeError: return tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target) def sum(x, axis=None, keepdims=False): """Sum of the values in a tensor, alongside the specified axis. Parameters ---------- x: A tensor or variable. axis: An integer, the axis to sum over.
tensorflow.nn.sigmoid_cross_entropy_with_logits
3,713
import tensorflow as tf [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3))
tensorflow.random_uniform
3,714
import tensorflow as tf dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) # Test when num_decoder_symbols is provided, the size of decoder output # is num_decoder_symbols. with tf.variable_scope("decoder_symbols_seq2seq"): dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"):
tensorflow.variable_scope
3,715
import tensorflow as tf with tf.variable_scope("no_tuple"): cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False) dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell1, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 4), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"): dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2,
tensorflow.get_variable
3,716
import tensorflow as tf tf.saved_model.load(
tensorflow.saved_model.load
3,717
import tensorflow as tf # Select optimizer self.optimizer_func = tf.train.AdagradOptimizer if self.hparams.grad_strategy == 'sgd': self.optimizer_func = tf.train.GradientDescentOptimizer self.separate_gradient_update() tf.summary.scalar('Gradient Norm', self.norm, collections=['train']) tf.summary.scalar('Learning Rate', self.ranker_learning_rate, collections=['train']) tf.summary.scalar('Final Loss', tf.reduce_mean(self.loss), collections=['train']) clipped_labels = tf.clip_by_value(reshaped_train_labels, clip_value_min=0, clip_value_max=1) pad_removed_train_output = self.remove_padding_for_metric_eval(self.docid_inputs, train_output) for metric in self.exp_settings['metrics']: for topn in self.exp_settings['metrics_topn']: list_weights = tf.reduce_mean(self.propensity_weights * clipped_labels, axis=1, keep_dims=True) metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, None) tf.summary.scalar('%s_%d' % (metric, topn), metric_value, collections=['train']) weighted_metric_value = utils.make_ranking_metric_fn(metric, topn)(reshaped_train_labels, pad_removed_train_output, list_weights)
tensorflow.reduce_mean
3,718
import tensorflow as tf box_encodings = ops.position_sensitive_crop_regions( location_feature_map, boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]), box_ind=get_box_indices(proposal_boxes), crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) box_encodings = tf.squeeze(box_encodings, squeeze_dims=[1, 2]) box_encodings = tf.reshape(box_encodings, [batch_size * num_boxes, 1, self.num_classes, self._box_code_size]) # Class predictions. total_classes = self.num_classes + 1 # Account for background class. class_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] *
tensorflow.reshape
3,719
from tensorflow.contrib.slim.python.slim.data import dataset_data_provider def testTFRecordDataset(self): dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(), 'tfrecord_dataset')) height = 300 width = 280 with self.cached_session(): test_dataset = _create_tfrecord_dataset(dataset_dir) provider = dataset_data_provider.DatasetDataProvider(test_dataset) key, image, label = provider.get(['record_key', 'image', 'label']) image = _resize_image(image, height, width) with session.Session('') as sess: with queues.QueueRunners(sess): key, image, label = sess.run([key, image, label]) split_key = key.decode('utf-8').split(':') self.assertEqual(2, len(split_key))
tensorflow.contrib.slim.python.slim.data.dataset_data_provider.DatasetDataProvider
3,720
import tensorflow as tf parallel_iterations=decoder.parallel_iterations, swap_memory=decoder.swap_memory) outputs = outputs.stack() weights = weights.stack() # batch_size, encoders, output time, input time states = states.stack() attns = attns.stack() samples = samples.stack() # put batch_size as first dimension outputs = tf.transpose(outputs, perm=(1, 0, 2)) weights = tf.transpose(weights, perm=(1, 0, 2)) states = tf.transpose(states, perm=(1, 0, 2)) attns = tf.transpose(attns, perm=(1, 0, 2)) samples = tf.transpose(samples) return outputs, weights, states, attns, samples, get_logits, initial_data def encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, align_encoder_id=0, encoder_input_length=None, feed_argmax=True, rewards=None, use_baseline=True, training=True, global_step=None,
tensorflow.transpose
3,721
import tensorflow as tf """ gh_x, gh_w = hermgauss(num_gauss_hermite_points) gh_x = gh_x.reshape(1, -1) gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi) shape = tf.shape(Fmu) Fmu, Fvar = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar)] X = gh_x * tf.sqrt(2.0 * Fvar) + Fmu logp = phi(X)
tensorflow.shape
3,722
import tensorflow as tf predictions, label_ids, weights=is_real_example) # Compute Matthew's correlation mcc = tf.div_no_nan( tp * tn - fp * fn, tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5)) # Compute accuracy accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions,
tensorflow.pow
3,723
import tensorflow as tf train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) mobilenet_v1.mobilenet_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEqual(output.shape, (eval_batch_size,)) def testLogitsNotSqueezed(self): num_classes = 25 images = tf.random_uniform([1, 224, 224, 3]) logits, _ = mobilenet_v1.mobilenet_v1(images, num_classes=num_classes, spatial_squeeze=False) with self.test_session() as sess: tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)])
tensorflow.random_uniform
3,724
import tensorflow as tf """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition" (http://ydwen.github.io/papers/WenECCV16.pdf) """ nrof_features = features.get_shape()[1] centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32, initializer=tf.constant_initializer(0), trainable=False) label = tf.reshape(label, [-1]) centers_batch = tf.gather(centers, label) diff = (1 - alfa) * (centers_batch - features) centers = tf.scatter_sub(centers, label, diff) # centers = tf.nn.l2_normalize(centers, 1, 1e-10, name='centers_norm') loss = tf.reduce_mean(tf.square(features - centers_batch)) return loss, centers
tensorflow.gather
3,725
from tensorflow.python.ops import gen_nn_ops features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `features`. """ with ops.op_scope([features], name, "Relu6") as name: features = ops.convert_to_tensor(features, name="features") return gen_nn_ops._relu6(features, name=name) def softmax_cross_entropy_with_logits(logits, labels, name=None): """Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image
tensorflow.python.ops.gen_nn_ops._relu6
3,726
import tensorflow as tf # [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f kl_grad = - f_polyak_i / (f_i_ + eps) k_dot_g = tf.reduce_sum(kl_grad * grad, axis=-1) adj = tf.maximum(0.0, (tf.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / ( tf.reduce_sum(tf.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps] # Calculate stats (before doing adjustment) for logging. avg_norm_k = avg_norm(kl_grad) avg_norm_g = avg_norm(grad) avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g)) avg_norm_adj = tf.reduce_mean(tf.abs(adj)) grad = grad - tf.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad # These are turst region adjusted gradients wrt f ie statistics of policy pi grads_f = -grad / (self.n_envs * self.n_steps) grads_policy = tf.gradients(f_i_, self.params, grads_f) grads_q = tf.gradients(loss_q * self.q_coef, self.params) grads = [gradient_add(g1, g2, param, verbose=self.verbose)
tensorflow.abs
3,727
import tensorflow as tf # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas:
tensorflow.nn.softmax
3,728
import tensorflow as tf paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
tensorflow.where
3,729
import tensorflow as tf outputs = [ f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])),
tensorflow.constant
3,730
import tensorflow as tf def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy(label_ids, predictions) loss = tf.metrics.mean(per_example_loss) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn) return output_spec return model_fn
tensorflow.contrib.tpu.TPUEstimatorSpec
3,731
import tensorflow as tf import traceback import time import os log = infolog.log def add_embedding_stats(summary_writer, embedding_names, paths_to_meta, checkpoint_path): # Create tensorboard projector config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig() config.model_checkpoint_path = checkpoint_path for embedding_name, path_to_meta in zip(embedding_names, paths_to_meta): # Initialize config embedding = config.embeddings.add() # Specifiy the embedding variable and the metadata embedding.tensor_name = embedding_name embedding.metadata_path = path_to_meta
tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig
3,732
import tensorflow as tf if self.tanh_constant is not None: op_tanh = self.tanh_constant / self.op_tanh_reduce logits = op_tanh * tf.tanh(logits) if use_bias: logits += self.b_soft_no_learn op_id = tf.multinomial(logits, 1) op_id = tf.to_int32(op_id) op_id = tf.reshape(op_id, [1]) arc_seq = arc_seq.write(start_id + 2 * i + 1, op_id) curr_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=op_id) log_prob += curr_log_prob curr_ent = tf.stop_gradient(tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=tf.nn.softmax(logits))) entropy += curr_ent inputs = tf.nn.embedding_lookup(self.w_emb, op_id) next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm) anchors = anchors.write(layer_id, next_h[-1]) anchors_w_1 = anchors_w_1.write(layer_id, tf.matmul(next_h[-1], self.w_attn_1)) inputs = self.g_emb return (layer_id + 1, inputs, next_c, next_h, anchors, anchors_w_1, arc_seq, entropy, log_prob) loop_vars = [
tensorflow.nn.softmax
3,733
from tensorflow.python.ops import data_flow_ops labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels') batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size') control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control') phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train') nrof_preprocess_threads = 4 image_size = (args.image_size, args.image_size) eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000, dtypes=[tf.string, tf.int32, tf.int32], shapes=[(1,), (1,), (1,)], shared_name=None, name=None) eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op') image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
tensorflow.python.ops.data_flow_ops.FIFOQueue
3,734
import tensorflow as tf x = tf.nn.bias_add(x, bias) return x def t_conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0): # good old t-conv. I love it! assert padding in ["SAME", "VALID"], 'valid paddings are "SAME", "VALID"' if type(size) == int: size = [size, size] if init_stddev <= 0.0: init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32) else: init = tf.truncated_normal_initializer(stddev=init_stddev) return tf.layers.conv2d_transpose(input, channels, kernel_size=size, strides=[stride, stride], padding=padding, kernel_initializer=init, name='tr_conv' + id, use_bias=use_bias) # Traditional U-Net def build_Unet_Arch(self, input_data, name="Unet_Arch"): self.base_number_of_features = 32
tensorflow.contrib.layers.variance_scaling_initializer
3,735
from tensorflow.python.training import saver log_every_steps=log_every_steps, supervisor_is_chief=(self._config.task == 0), supervisor_master=self._config.master, feed_fn=feed_fn, max_steps=steps, fail_on_nan_loss=fail_on_nan_loss) def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None): if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'): return checkpoint_path = saver.latest_checkpoint(self._model_dir) eval_dir = os.path.join(self._model_dir, 'eval') with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) eval_dict = self._get_eval_ops(features, targets, metrics or self._get_default_metric_functions()) eval_results, _ = evaluate( graph=g,
tensorflow.python.training.saver.latest_checkpoint
3,736
import tensorflow as tf def model_fn_builder(bert_config, init_checkpoint, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] model = modeling.BertModel(
tensorflow.logging.info
3,737
import tensorflow as tf gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) # Unnecessary, if you have already sorted when making tfrecord and no data augmentation. gtboxes_and_label_q = tf.py_func(func=re_order, inp=[tf.reshape(gtboxes_and_label_q, [-1, 9]), True], Tout=[tf.float32]) gtboxes_and_label_q = tf.reshape(gtboxes_and_label_q, [cfgs.BATCH_SIZE, -1, 9]) img = inputs_list[i][0] img_shape = inputs_list[i][-2:] h_crop = tf.reduce_max(img_shape[0]) w_crop = tf.reduce_max(img_shape[1]) img = tf.image.crop_to_bounding_box(image=img, offset_height=0, offset_width=0, target_height=tf.cast(h_crop, tf.int32), target_width=tf.cast(w_crop, tf.int32)) outputs = fcos.build_whole_detection_network(input_img_batch=img, gtboxes_batch_h=gtboxes_and_label_h, gtboxes_batch_r=gtboxes_and_label_q, gpu_id=i)
tensorflow.reduce_max
3,738
import tensorflow as tf """ with tf.variable_scope(name):
tensorflow.variable_scope
3,739
import tensorflow as tf # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length],
tensorflow.constant
3,740
import tensorflow as tf w = x_shape[2] + pad_w0 + pad_w1 pad_h1 += tf.mod(-h + bsize[1], bstrides[1]) pad_w1 += tf.mod(-w + bsize[2], bstrides[2]) return tf.pad(x, [[0, 0], [pad_h0, pad_h1], [pad_w0, pad_w1], [0, 0]]) else: if bstrides is not None: assert bsize is not None, 'Must pass in bsize and bstrides together.' h = x_shape[1] w = x_shape[2] pad_h1 = tf.mod(-h + bsize[1], bstrides[1]) pad_w1 = tf.mod(-w + bsize[2], bstrides[2]) return tf.cond( tf.logical_or(tf.greater(pad_h1, 0), tf.greater(pad_w1, 0)), lambda: tf.pad(x, [[0, 0], [0, pad_h1], [0, pad_w1], [0, 0]]), lambda: x) else: return x def _get_offset_array_tf(shape): """
tensorflow.mod
3,741
import tensorflow as tf raise ValueError('depth=%g is a not a valid setting!' % depth) # input tensors self.input_x = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_x") self.input_tags = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_tags") self.input_deps = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_dependency") self.input_head = tf.placeholder(tf.int32, [None, sequence_max_length], name="input_head") self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y") self.is_training = tf.placeholder(tf.bool) initializer = tf.contrib.layers.variance_scaling_initializer() # Embedding Lookup 16 with tf.device('/cpu:0'), tf.name_scope("embedding"): if use_he_uniform: self.embedding_W = tf.get_variable(name='lookup_W', shape=[num_quantized_chars, embedding_size],
tensorflow.placeholder
3,742
import tensorflow as tf scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names:
tensorflow.logging.info
3,743
import tensorflow as tf D = tf.cast(tf.shape(X)[1], tf.float32) N = tf.cast(tf.shape(X)[0], tf.float32) D_int = tf.cast(D, tf.int32) N_int = tf.cast(N, tf.int32) if y is None: y = silverman_rule_of_thumb(N) YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32), scale_diag=tf.ones(D_int, tf.float32)) Y = YDistr.sample(N_int) T = 1.0/(2.0*N*tf.sqrt(m.pi*y)) A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2) A = tf.reduce_sum(phi_sampling(A0/(4*y), D))
tensorflow.zeros
3,744
import tensorflow as tf order_m: TensorLike, x: TensorLike) -> TensorLike: degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) x = tf.convert_to_tensor(value=x) pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x) return tf.where( tf.equal(degree_l, order_m), pmm, _evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm)) def _spherical_harmonics_normalization(l, m, var_type=tf.float64): l = tf.cast(l, dtype=var_type) m = tf.cast(m, dtype=var_type) numerator = (2.0 * l + 1.0) * factorial(l - tf.abs(m)) denominator = 4.0 * np.pi * factorial(l + tf.abs(m)) return tf.sqrt(numerator / denominator) def _evaluate_spherical_harmonics_branch(degree, order, theta, phi, sign_order, var_type=tf.float64):
tensorflow.cast
3,745
import tensorflow as tf # check_shape([adv_bc, log_f_bc], [[self.n_envs * self.n_steps, self.n_act]] * 2) if continuous: gain_bc = tf.stop_gradient(adv_bc * tf.nn.relu(1.0 - (self.correction_term / (rho_i_ + eps))) * f_i_) else:
tensorflow.nn.relu
3,746
from tensorflow.python.framework import ops Raises: ValueError: if `multiple` is less than 1, or `dim` is not in `[-rank(tensor), rank(tensor)]`. """ if multiple < 1: raise ValueError('Invalid multiple %s, must be > 0.' % multiple) with ops.name_scope( name, 'expand_and_tile', (tensor, multiple, dim)) as scope: # Sparse. if isinstance(tensor, ops.SparseTensorValue): tensor = ops.SparseTensor.from_value(tensor) if isinstance(tensor, ops.SparseTensor): if dim < 0:
tensorflow.python.framework.ops.name_scope
3,747
import tensorflow as tf
tensorflow.variable_scope
3,748
import tensorflow as tf one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs)
tensorflow.reduce_sum
3,749
import tensorflow as tf return tf.keras.utils.register_keras_serializable(package=package) else: return lambda cls: cls def _check_tensorflow_version(): """Check that we're using a compatible TF version. Raises a warning if either Tensorflow version is less that 2.0 or TF 2.x is not enabled. If TF 2.x is enabled, but version is < TF 2.3, raises a warning to indicate that resources may not be initialized. """ major, minor, _ = tf.version.VERSION.split('.') if not (int(major) >= 2 and tf2.enabled()): tf.compat.v1.logging.warning( 'Tensorflow version (%s) found. TransformFeaturesLayer is supported ' 'only for TF 2.x with TF 2.x behaviors enabled and may not work as ' 'intended.', tf.version.VERSION) elif int(major) == 2 and int(minor) < 3: # TODO(varshaan): Log a more specific warning. tf.compat.v1.logging.warning( 'Tensorflow version (%s) found. TransformFeaturesLayer may not work ' 'as intended if the SavedModel contains an initialization op.', tf.version.VERSION)
tensorflow.version.VERSION.split
3,750
import tensorflow as tf # scaffold related configuration tf.app.flags.DEFINE_string( 'data_dir', '../Datasets/tfrecords',#'/media/rs/0E06CD1706CD0127/Kapok/Chi/Datasets/tfrecords', 'The directory where the dataset input data is stored.') tf.app.flags.DEFINE_string( 'dataset_name', '{}_????', 'The pattern of the dataset name to load.') tf.app.flags.DEFINE_string( 'model_dir', './logs_sext_cpn/',
tensorflow.app.flags.DEFINE_string
3,751
import tensorflow as tf :param param_noise_filter_func: (function (TensorFlow Tensor): bool) function that decides whether or not a variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter is used by default. :return: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor, (TensorFlow Tensor, TensorFlow Tensor) act function to select and action given observation (See the top of the file for details), A tuple containing the observation placeholder and the processed observation placeholder respectively. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q. policy = q_func(sess, ob_space, ac_space, 1, 1, None) obs_phs = (policy.obs_ph, policy.processed_obs) # Perturbable Q used for the actual rollout. with tf.variable_scope("perturbed_model", reuse=False): perturbable_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
tensorflow.constant_initializer
3,752
import tensorflow as tf else: if not FLAGS.restore: tf.gfile.DeleteRecursively(FLAGS.checkpoint_path) tf.gfile.MkDir(FLAGS.checkpoint_path) input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps') if FLAGS.geometry == 'RBOX': input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps') else: input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps') input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_training_masks') global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.94, staircase=True) # add summary tf.summary.scalar('learning_rate', learning_rate) opt = tf.train.AdamOptimizer(learning_rate) opt = MixedPrecisionOptimizer(opt, scale=FLAGS.loss_scale) from npu_bridge.estimator.npu.npu_optimizer import NPUDistributedOptimizer opt = NPUDistributedOptimizer(opt) # split
tensorflow.placeholder
3,753
import tensorflow as tf Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ) @property def observ(self): """Access the variable holding the current observation.""" return self._observ @property def action(self): """Access the variable holding the last received action.""" return self._action
tensorflow.scatter_update
3,754
import tensorflow as tf return l2_out def __forward_pass(self, x, reuse): fc_size1 = 384 fc_size2 = 192 # convolutional layers with tf.variable_scope('conv1'): layer1, weights1 = new_conv_layer(x, name="conv1", num_input_channels=3, num_filters=64, filter_size=5, ac_fun=tf.nn.relu, pool_ksize=[1, 3, 3, 1]) with tf.variable_scope('conv2'): layer2, weights2 = new_conv_layer(input=layer1, name="conv2", num_input_channels=64, num_filters=64, filter_size=5, ac_fun=tf.nn.relu, pool_ksize=[1, 3, 3, 1]) with tf.name_scope('flatten'): layer3, num_features = flatten_layer(layer2) # fully connected layers
tensorflow.variable_scope
3,755
import tensorflow as tf cross_entropy = tf.reduce_mean( -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]) ) self.cross_entropy = cross_entropy self.cross_entropy_grads = tf.gradients(cross_entropy, [w, b]) self.sess = tf.Session() # In order to get and set the weights, we pass in the loss function to # Ray's TensorFlowVariables to automatically create methods to modify # the weights. self.variables = ray.experimental.tf_utils.TensorFlowVariables(
tensorflow.Session
3,756
from tensorflow.python.framework import tensor_util # gradients and the attrs, but if we do not know filter_shape # statically, then we are unlikely to know the shape of the # gradients either. return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("Conv2DBackpropInput") def _Conv2DBackpropInputShape(op): """Shape function for the Conv2DBackpropInput op.""" input_shape = tensor_util.constant_value(op.inputs[0]) if input_shape is not None: return [tensor_shape.TensorShape(input_shape.tolist())] else: # NOTE(mrry): We could in principle work out the shape from the # gradients and the attrs, but if we do not know input_shape # statically, then we are unlikely to know the shape of the # gradients either. return [tensor_shape.unknown_shape(ndims=4)]
tensorflow.python.framework.tensor_util.constant_value
3,757
import tensorflow as tf message = annotations_pb2.BucketBoundaries() annotation.Unpack(message) self.assertAllClose(list(message.boundaries), [1]) def test_infer_feature_schema_with_ragged_tensor(self): with tf.compat.v1.Graph().as_default() as graph: outputs = { 'foo': tf.RaggedTensor.from_row_splits( values=tf.constant([3, 1, 4, 1, 5, 9, 2, 6], tf.int64), row_splits=[0, 4, 4, 7, 8, 8]),
tensorflow.compat.v1.Graph
3,758
from tensorflow.python.framework import ops class RegularizeGradientDescentOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, lambd=0.5, use_locking=False, name="RGD"): super(RegularizeGradientDescentOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._lambda = lambd # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._lambda_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._lambda_t = ops.convert_to_tensor(self._lambda, name="lambda") def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype) g_t = grad var_update = state_ops.assign_sub(var, lr_t * (g_t - lambda_t * var) ) return control_flow_ops.group(*[var_update]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.")
tensorflow.python.framework.ops.convert_to_tensor
3,759
import tensorflow as tf # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
tensorflow.random_normal
3,760
import tensorflow as tf def _mask_and_accuracy(values, answers, loss_weights): values = tf.argmax(values,axis=2) x = tf.cast(values, dtype=tf.int32) y = tf.cast(answers, dtype=tf.int32) res = tf.equal(x, y) res = tf.cast(res, dtype=tf.float32) res = tf.multiply(res, loss_weights) return tf.reduce_sum(res)
tensorflow.cast
3,761
import tensorflow as tf with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest} for name, model in models.iteritems(): model.export_ops(name)
tensorflow.variable_scope
3,762
import tensorflow as tf strides=strides, padding=pad, data_format=data_format) + b def fc(self, x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable( "w", [nin, nh], initializer=self.ortho_init(init_scale)) b = tf.get_variable(
tensorflow.variable_scope
3,763
import tensorflow as tf data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs += shortcut inputs = tf.nn.relu(inputs) return inputs def _building_block_v2(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v2, without a bottleneck.
tensorflow.nn.relu
3,764
import tensorflow as tf new_context_act = tf.nn.relu(new_context) elif activation == "elu": new_context_act = tf.nn.elu(new_context) elif activation == "linear": new_context_act = tf.identity(new_context) else: raise RuntimeError gate_sig = tf.nn.sigmoid(gate) combination_res = gate_sig * new_context_act + (1 - gate_sig) * rep_map # bs,bn,bl,vec with tf.variable_scope('restore_original_length'): combination_res_reshape = tf.reshape(combination_res, [bs, bn * bl, ivec]) # bs,bn*bl,vec output = combination_res_reshape[:, :sl, :] return output
tensorflow.variable_scope
3,765
import tensorflow as tf tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0)) z1_valid = tf.to_float( tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0))
tensorflow.greater_equal
3,766
import tensorflow as tf self.end_points_D_val['logits'], targets, 1))) self.error_rate = 1. - \ tf.reduce_mean(tf.to_float(tf.nn.in_top_k( self.end_points_D['class_logits'], targets, 1))) if gpu_idx == 0: update = tf.assign(num_error_rate, num_error_rate + 1.) with tf.control_dependencies([update]): tc = tf.maximum(.01, 1. / num_error_rate) update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate) with tf.control_dependencies([update]): self.d_loss_class = tf.identity(self.d_loss_class) self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits( logits=self.end_points_D['D_on_G_logits'], labels=tf.zeros_like(self.end_points_D['D_on_G_logits'])) self.d_loss_class = tf.reduce_mean(self.d_loss_class) self.d_loss_real = tf.reduce_mean(self.d_loss_real) self.d_loss_fake = tf.reduce_mean(self.d_loss_fake)
tensorflow.control_dependencies
3,767
import tensorflow as tf List of tensors from growth block `Conv2D` layers. """ with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Get conv block layer properties. conv_block = params["discriminator_growth_conv_blocks"][block_idx] # Create new inner convolutional layers. conv_tensors = [ self.conv_layer_blocks[1 + block_idx][i]( inputs=tf.zeros( shape=[1] + conv_block[i][0:3], dtype=tf.float32 ) ) for i in range(len(conv_block)) ] print_obj( "\nbuild_discriminator_growth_layer_block", "conv_tensors",
tensorflow.zeros
3,768
import tensorflow as tf def seq_length(data): used = tf.sign(tf.reduce_max(tf.abs(data), axis=2)) length = tf.reduce_sum(used, axis=1) length = tf.cast(length, tf.int64) return length
tensorflow.cast
3,769
import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('device', '/gpu:0', "device") tf.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}") tf.app.flags.DEFINE_string('log_dir', "", "log_dir") tf.app.flags.DEFINE_integer('seed', 1, "initial random seed") tf.app.flags.DEFINE_bool('validation', False, "") tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch") tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch") tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch") tf.app.flags.DEFINE_integer('eval_freq', 5, "") tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training") tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay") tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch") tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate") tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate") tf.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start") tf.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}")
tensorflow.app.flags.DEFINE_integer
3,770
import tensorflow as tf :param input_tensor: :param k_size: :param out_dims: :param rate: :param padding: :param w_init: :param b_init: :param use_bias: :param name: :return: """ with tf.variable_scope(name): in_shape = input_tensor.get_shape().as_list() in_channel = in_shape[3] assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!" padding = padding.upper() if isinstance(k_size, list): filter_shape = [k_size[0], k_size[1]] + [in_channel, out_dims] else: filter_shape = [k_size, k_size] + [in_channel, out_dims]
tensorflow.variable_scope
3,771
import tensorflow as tf 'match_threshold', 0.56, 'Matching threshold in the loss function.') tf.app.flags.DEFINE_float( 'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.') # optimizer related configuration tf.app.flags.DEFINE_float( 'weight_decay', 0.0005, 'The weight decay on the model weights.') tf.app.flags.DEFINE_float( 'momentum', 0.9, 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') tf.app.flags.DEFINE_float( 'end_learning_rate', 0.00005,
tensorflow.app.flags.DEFINE_float
3,772
import tensorflow as tf outer = tf.matrix_band_part(outer, 0, config.ans_limit) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits1, labels=self.y1) losses2 = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits2, labels=self.y2) self.loss = tf.reduce_mean(losses + losses2) if config.l2_norm is not None: variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables) self.loss += l2_loss
tensorflow.reduce_mean
3,773
import tensorflow as tf def sigmoid_ce_with_logits(logits, labels): return tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) def sigmoid_kl_with_logits(logits, targets): assert isinstance(targets, float) if targets in [0., 1.]: entropy = 0. else: entropy = - targets*tf.log(targets) - (1. - targets)*tf.log(1. - targets) return sigmoid_ce_with_logits(logits, tf.ones_like(logits)*targets) - entropy def gradient_difference_loss(x, y): x_h_diff = x[:, 1:] - x[:, :-1] x_w_diff = x[:, :, 1:] - x[:, :, :-1] y_h_diff = y[:, 1:] - y[:, :-1] y_w_diff = y[:, :, 1:] - y[:, :, :-1] h_diff = tf.abs(tf.abs(x_h_diff) - tf.abs(y_h_diff))
tensorflow.log
3,774
import tensorflow as tf Returns: (tf.Tensor) A single value tensor containing the loss. (tf.Tensor) A tensor containing the propensity weights. """ loss = None with tf.name_scope(name, "click_weighted_pairwise_loss",[output]): sliced_output = tf.unstack(output, axis=1) sliced_label = tf.unstack(labels, axis=1) sliced_propensity = tf.unstack(propensity_weights, axis=1) for i in range(len(sliced_output)): for j in range(i+1, len(sliced_output)):
tensorflow.name_scope
3,775
import tensorflow as tf server_test_data = [ d._replace(updated_state=updated_state) for d in server_test_data ] return server_test_data, decode_params def _non_adaptive_many_to_one_encode_decode(): """Implementation of the method for `EncodingStageInterface`.""" server_graph = tf.Graph() with server_graph.as_default(): shape = input_values[0].shape encode_params, decode_params = stage.get_params() with self.session(server_graph) as sess: encode_params, decode_params = self.evaluate_tf_py_list( [encode_params, decode_params], sess)
tensorflow.Graph
3,776
import tensorflow as tf W_init_args = {} if b_init_args is None: b_init_args = {} self.inputs = prev_layer.outputs if self.inputs.get_shape().ndims != 2: raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) # self.outputs = act(tf.matmul(self.inputs, W) + b) LayersConfig.set_keep[name] = tf.placeholder(tf.float32) W_dropcon = tf.nn.dropout(W, LayersConfig.set_keep[name]) self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b) # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) self.all_drop.update({LayersConfig.set_keep[name]: keep}) self.all_layers.append(self.outputs) self.all_params.extend([W, b])
tensorflow.get_variable
3,777
import tensorflow as tf mean = variable_on_cpu( "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) if scale: gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.)) beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.)) # choose the appropriate moments if train: used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var if bn_lag > 0.: used_mean -= (1. - bn_lag) * (used_mean - tf.stop_gradient(mean)) used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var)) used_mean /= (1. - bn_lag**(step + 1)) used_var /= (1. - bn_lag**(step + 1)) else: used_mean, used_var = mean, var cur_mean, cur_var = used_mean, used_var # normalize res = (input_ - used_mean) / tf.sqrt(used_var + epsilon) # de-normalize if scale: res *= gamma
tensorflow.stop_gradient
3,778
import tensorflow as tf def test_randomly_select_one_point_per_segment(self): instance_labels = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 2, 2, 2, 2, 2, 2], [1, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2]], dtype=tf.int32) instance_labels = tf.reshape(instance_labels, [-1]) (indices, masks_t) = isu.randomly_select_one_point_per_segment(instance_labels) masks = tf.transpose(masks_t) masks = tf.reshape(masks, [3, 5, 8]) expected_masks = self.get_instance_masks() selected_instances = tf.gather(instance_labels, indices) expected_selected_instances = tf.constant([0, 1, 2], dtype=tf.int32)
tensorflow.reshape
3,779
import tensorflow as tf correct_prediction_action = tf.equal( tf.argmax(one_hot_labels_action, 1), tf.argmax(self.predictions_action, 1) ) self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float')) tf.scalar_summary('accuracy_action', self.accuracy_action) correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2), tf.argmax(self.predictions_arguments, 2)) self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float'))
tensorflow.scalar_summary
3,780
from tensorflow.python.ops import gen_nn_ops """ return gen_nn_ops._top_kv2(input, k=k, sorted=sorted, name=name)
tensorflow.python.ops.gen_nn_ops._top_kv2
3,781
import tensorflow as tf # [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f kl_grad = - f_polyak_i / (f_i_ + eps) k_dot_g = tf.reduce_sum(kl_grad * grad, axis=-1) adj = tf.maximum(0.0, (tf.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / ( tf.reduce_sum(tf.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps]
tensorflow.reduce_sum
3,782
import tensorflow.contrib.slim as slim if cfgs.GRADIENT_CLIPPING_BY_NORM is not None: grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)
tensorflow.contrib.slim.learning.clip_gradient_norms
3,783
import tensorflow.contrib.graph_editor as ge if d_xs_new[j] is not None: if d_xs[j] is None: d_xs[j] = _unsparsify(d_xs_new[j]) else: d_xs[j] += _unsparsify(d_xs_new[j]) return d_xs def tf_toposort(ts, within_ops=None): all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops) deps = {} for op in all_ops: for o in op.outputs: deps[o] = set(op.inputs) sorted_ts = toposort(deps) # only keep the tensors from our original list ts_sorted_lists = []
tensorflow.contrib.graph_editor.get_forward_walk_ops
3,784
import tensorflow as tf dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell1, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 4), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"): dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)] with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp2, cell, num_encoder_symbols=2,
tensorflow.variable_scope
3,785
from tensorflow.python.ops import math_ops else: return self._linear_logits(features) def _get_weight_tensor(self, features): if not self._weight_column_name: return None else: return array_ops.reshape( math_ops.to_float(features[self._weight_column_name]), shape=(-1,)) def _loss(self, logits, target, weight_tensor): if self._n_classes < 2: loss_vec = math_ops.square(logits - math_ops.to_float(target)) elif self._n_classes == 2: loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target)) else: loss_vec = nn.sparse_softmax_cross_entropy_with_logits( logits, array_ops.reshape(target, [-1])) if weight_tensor is None: return math_ops.reduce_mean(loss_vec, name="loss") else: loss_vec = array_ops.reshape(loss_vec, shape=(-1,)) loss_vec = math_ops.mul(
tensorflow.python.ops.math_ops.to_float
3,786
import tensorflow as tf self.batch_size = imgshape[1] featsize = 1024 srcimg = image[0] tgtimg = image[2] tgtctx = image[1] with tf.variable_scope("conv_context") as scope: tgtctx_h0 = lrelu(conv2d(tgtctx, self.df_dim, name='h0_conv')) tgtctx_h1 = lrelu(conv2d(tgtctx_h0, self.df_dim*2, name='h1_conv')) tgtctx_h2 = lrelu(conv2d(tgtctx_h1, self.df_dim*4, name='h2_conv')) tgtctx_h3 = lrelu(conv2d(tgtctx_h2, self.df_dim*8, name='h3_conv')) tgtctx_h4 = lrelu(linear(tf.reshape(tgtctx_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
tensorflow.variable_scope
3,787
from tensorflow.python.ops import array_ops def loss_fn(logits, target): check_shape_op = control_flow_ops.Assert( math_ops.less_equal(array_ops.rank(target), 2), ["target's shape should be either [batch_size, 1] or [batch_size]"]) with ops.control_dependencies([check_shape_op]):
tensorflow.python.ops.array_ops.rank
3,788
import tensorflow as tf features = {TIMESERIES_COL: inputs} return features, labels # Create list of files that match pattern file_list = tf.gfile.Glob(filename) # Create dataset from file list dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
tensorflow.gfile.Glob
3,789
import tensorflow as tf def custom_getter(getter, name, *args, **kwargs): name = name.replace("polyak_model/", "") val = ema.average(getter(name, *args, **kwargs)) return val with tf.variable_scope("polyak_model", reuse=True, custom_getter=custom_getter): self.polyak_model = polyak_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, self.n_steps + 1, self.n_envs * (self.n_steps + 1), reuse=True, **self.policy_kwargs)
tensorflow.variable_scope
3,790
import tensorflow as tf beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer()) beta = tf.reshape(beta, new_shape) gamma = tf.get_variable('gamma', [ch], initializer=tf.constant_initializer(1.0)) gamma = tf.reshape(gamma, new_shape) return tf.nn.batch_normalization(inputdata, mean, var, beta, gamma, epsilon, name=name) @staticmethod def dropout(inputdata, keep_prob, noise_shape=None, name=None):
tensorflow.nn.batch_normalization
3,791
import tensorflow as tf create_session=True)) # pylint: enable=g-long-lambda def test_infer_feature_schema(self, make_tensors_fn, feature_spec, domains=None, create_session=False): with tf.compat.v1.Graph().as_default() as graph: tensors = make_tensors_fn() if create_session: with tf.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(tensors, graph, session) else: schema = schema_inference.infer_feature_schema(tensors, graph) expected_schema = schema_utils.schema_from_feature_spec( feature_spec, domains) self.assertEqual(schema, expected_schema) def test_infer_feature_schema_bad_rank(self): with tf.compat.v1.Graph().as_default() as graph:
tensorflow.compat.v1.Session
3,792
import tensorflow as tf d_checkpoints[r] += dr def _unsparsify(x): if not isinstance(x, tf.IndexedSlices): return x assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape" indices = x.indices while indices.shape.ndims < x.values.shape.ndims: indices = tf.expand_dims(indices, -1) return tf.scatter_nd(indices, x.values, x.dense_shape) # partial derivatives to xs (usually the params of the neural net) d_xs_new = dv[len(checkpoints_other):] for j in range(len(xs)): if d_xs_new[j] is not None: if d_xs[j] is None: d_xs[j] = _unsparsify(d_xs_new[j])
tensorflow.scatter_nd
3,793
import tensorflow as tf # p=tf.Print(p,[p],message="show the weights") self.exam_loss += self.hparams.l1_loss * tf.reduce_sum(tf.abs(p)) for p in ranking_model_params: self.rank_loss += self.hparams.l2_loss * tf.nn.l2_loss(p) self.loss = self.exam_loss + self.hparams.ranker_loss_weight * self.rank_loss denoise_gradients = tf.gradients(self.exam_loss, denoise_params) ranking_model_gradients = tf.gradients(self.rank_loss, ranking_model_params) if self.hparams.max_gradient_norm > 0: denoise_gradients, denoise_norm = tf.clip_by_global_norm(denoise_gradients, self.hparams.max_gradient_norm) ranking_model_gradients, ranking_model_norm = tf.clip_by_global_norm(ranking_model_gradients, self.hparams.max_gradient_norm * self.hparams.ranker_loss_weight) self.norm = tf.global_norm(denoise_gradients + ranking_model_gradients) opt_denoise = self.optimizer_func(self.hparams.learning_rate) opt_ranker = self.optimizer_func(self.ranker_learning_rate) denoise_updates = opt_denoise.apply_gradients(zip(denoise_gradients, denoise_params), global_step=self.global_step) ranker_updates = opt_ranker.apply_gradients(zip(ranking_model_gradients, ranking_model_params))
tensorflow.clip_by_global_norm
3,794
import tensorflow as tf create_model_fn=FakeDetectionModel, train_config=train_config, master='', task=0, num_clones=1, worker_replicas=1, clone_on_cpu=True, ps_tasks=0, worker_job_name='worker', is_chief=True, train_dir=train_dir) if __name__ == '__main__': tf.test.main()
tensorflow.test.main
3,795
import tensorflow as tf if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale
tensorflow.shape
3,796
import tensorflow as tf _validate_input_parameters(is_tensor=False, shape=shape, tt_rank=tt_rank, batch_size=batch_size) num_dims = shape[0].size if tt_rank.size == 1: tt_rank = tt_rank * np.ones(num_dims - 1) tt_rank = np.concatenate([[1], tt_rank, [1]]) shape = shape.astype(int) tt_rank = tt_rank.astype(int) tt_cores = [None] * num_dims with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (batch_size, tt_rank[i], shape[0][i], shape[1][i], tt_rank[i + 1]) tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrainBatch(tt_cores, shape, tt_rank, batch_size) def ones_like(tt, name='t3f_ones_like'): """Constructs t3f.ones with the shape of `tt`. In the case when `tt` is TensorTrainBatch constructs t3f.ones with the shape of a TensorTrain in `tt`. Args:
tensorflow.random_normal
3,797
import tensorflow as tf tf.constant([0.0], dtype=tf.float32, name="log_prob"), ] loop_outputs = tf.while_loop(_condition, _body, loop_vars, parallel_iterations=1) arc_seq = loop_outputs[-3].stack() arc_seq = tf.reshape(arc_seq, [-1]) entropy = tf.reduce_sum(loop_outputs[-2]) log_prob = tf.reduce_sum(loop_outputs[-1]) last_c = loop_outputs[-7] last_h = loop_outputs[-6] return arc_seq, entropy, log_prob, last_c, last_h
tensorflow.reduce_sum
3,798
from tensorflow.python.ops import state_ops broadcast_weights = math_ops.mul( weights, array_ops.ones_like(average_precision), name='broadcast_weights') batch_max = math_ops.reduce_sum(broadcast_weights, name='batch_max') max_update = state_ops.assign_add(max_var, batch_max, name='update') with ops.name_scope(None, 'total', (average_precision,)) as total_scope: total_var = contrib_variables.local_variable( array_ops.zeros([], dtype=dtypes.float64), name=total_scope)
tensorflow.python.ops.state_ops.assign_add
3,799