seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf maxval=30521, dtype=tf.int32) # case 1 hparams = { "pretrained_model_name": None, } regressor = XLNetRegressor(hparams=hparams) logits = regressor(inputs) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) logits_ = sess.run(logits) self.assertEqual(logits_.shape, (batch_size,)) # case 2 hparams = { "pretrained_model_name": None, "regr_strategy": "cls_time" } regressor = XLNetRegressor(hparams=hparams)
tensorflow.global_variables_initializer
7,100
import tensorflow as tf break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary class PPO_HC(PPO): def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
tensorflow.variable_scope
7,101
import tensorflow as tf constraint['sigmean'] = (5.20, 5.30) constraint['sigwidth'] = (0.001, 1.) constraint['argpar'] = (-100., -1.) constraint['nsig'] = (0., 10000) constraint['nbkg'] = (0., 10000) constraint['mes'] = (5.20, 5.30) # keep a variable dictionary for easy key-based access compatible with constraints vdict = {} pi = tf.constant(np.pi, dtype=tf.float64, name="pi") sqrt2pi = tf.constant(np.sqrt(2 * np.pi), dtype=tf.float64, name="sqrt2pi") two = tf.constant(2, dtype=tf.float64, name="two") one = tf.constant(1, dtype=tf.float64, name="one") zero = tf.constant(0, dtype=tf.float64, name="zero") def gradsafe_sqrt(x, clip_low=1e-18, name=None): with tf.name_scope(name, "gradsafe_sqrt"): return tf.sqrt(tf.clip_by_value(x, clip_low, x)) def argus_integral_phalf(m_low, m_high, m0, c): """ Only valid for argus_pdf with p=0.5! Otherwise need to do numerical integral. """
tensorflow.constant
7,102
import tensorflow as tf n_batch_train = n_batch*n_gpu n_updates_total = (n_train//n_batch_train)*n_iter X_train = tf.placeholder(tf.int32, [n_batch_train, 2, n_ctx, 2]) M_train = tf.placeholder(tf.float32, [n_batch_train, 2, n_ctx]) X = tf.placeholder(tf.int32, [None, 2, n_ctx, 2]) M = tf.placeholder(tf.float32, [None, 2, n_ctx]) Y_train = tf.placeholder(tf.int32, [n_batch_train]) Y = tf.placeholder(tf.int32, [None])
tensorflow.placeholder
7,103
import tensorflow as tf step=global_step) tf.contrib.summary.scalar( 'rpn_score_loss', tf.reduce_mean(rpn_score_loss), step=global_step) tf.contrib.summary.scalar( 'rpn_box_loss', tf.reduce_mean(rpn_box_loss), step=global_step) tf.contrib.summary.scalar( 'total_fast_rcnn_loss', tf.reduce_mean(total_fast_rcnn_loss), step=global_step) tf.contrib.summary.scalar( 'fast_rcnn_class_loss', tf.reduce_mean(fast_rcnn_class_loss), step=global_step) tf.contrib.summary.scalar( 'fast_rcnn_box_loss', tf.reduce_mean(fast_rcnn_box_loss), step=global_step) if params['include_mask']: tf.contrib.summary.scalar( 'mask_loss', tf.reduce_mean(mask_loss), step=global_step) tf.contrib.summary.scalar( 'learning_rate', tf.reduce_mean(learning_rate),
tensorflow.reduce_mean
7,104
import tensorflow as tf width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq,
tensorflow.gather
7,105
from tensorflow.python.framework import ops elif 0 <= dimension and dimension < input_shape.ndims: returned_shape = [] for i, dim in enumerate(input_shape.dims): if i != dimension: returned_shape.append(dim) return [tensor_shape.TensorShape(returned_shape)] else: raise ValueError( "dimension (%d) must be in the range [0, %d), where %d is the number " "of dimensions in the input" % (dimension, input_shape.ndims, input_shape.ndims)) @ops.RegisterShape("All") @ops.RegisterShape("Any") @ops.RegisterShape("Max") @ops.RegisterShape("Mean") @ops.RegisterShape("Min") @ops.RegisterShape("Prod") @ops.RegisterShape("Sum") def _ReductionShape(op): """Common shape function for reduction ops.""" input_shape = op.inputs[0].get_shape() reduction_indices = tensor_util.ConstantValue(op.inputs[1]) keep_dims = op.get_attr("keep_dims") if reduction_indices is None or input_shape.ndims is None: if keep_dims: return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] else: return [tensor_shape.unknown_shape()]
tensorflow.python.framework.ops.RegisterShape
7,106
import tensorflow as tf It is probably a good idea since the whole session must be later be restored by the ChiefSession """ os.makedirs(self._checkpoint_dir, exist_ok=True) #variables = tf.trainable_variables() self._saver = tf.train.Saver(variables, max_to_keep=max_to_keep, save_relative_paths=True) def _save_graph(self): writer = tf.summary.FileWriter(logdir=self._checkpoint_dir, # graph=self.sess.graph, graph=tf.get_default_graph(), filename_suffix="-graph" ) writer.flush() def _assemble_checkpoint_name(self, checkpoint_dir): path = os.path.join(checkpoint_dir, "model.ckpt") return path
tensorflow.get_default_graph
7,107
import tensorflow as tf mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tensorflow.random_normal
7,108
import tensorflow as tf if use_scale: gamma = tf.get_variable('gamma', [channnel], initializer=tf.constant_initializer(1.0))
tensorflow.constant_initializer
7,109
from tensorflow.python.platform import gfile import numpy as np from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.python.platform import gfile DBPEDIA_URL = 'https://github.com/le-scientifique/torchDatasets/blob/master/dbpedia_csv.tar.gz' def maybe_download_dbpedia(data_dir): """Download if DBpedia data is not present.""" train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv') test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv') if not (gfile.Exists(train_path) and gfile.Exists(test_path)): archive_path = base.maybe_download( 'dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL) tfile = tarfile.open(archive_path, 'r:*') tfile.extractall(data_dir) def load_dbpedia(size='small', test_with_fake_data=False): """Get DBpedia datasets from CSV files.""" if not test_with_fake_data: data_dir = os.path.join(os.getenv('TF_EXP_BASE_DIR', ''), 'dbpedia_data') maybe_download_dbpedia(data_dir)
tensorflow.python.platform.gfile.Exists
7,110
import tensorflow as tf self.S = tf.placeholder(tf.float32, [None, self.num_global_s], name='S') # input Global State self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1 self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1
tensorflow.placeholder
7,111
import tensorflow as tf tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=validation_input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False, batch_size=FLAGS.eval_batch_size, use_hvd=FLAGS.use_hvd) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_train_eval: tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True,
tensorflow.gfile.GFile
7,112
import tensorflow as tf labels, logits, surrogate_type=surrogate_type, positive_weights=1.0, negative_weights=lambdas) maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) lambda_term = lambdas * target_rate * (1.0 - label_priors) * maybe_log2 loss = tf.reshape(weighted_loss - lambda_term, original_shape) other_outputs = { 'lambdas': lambdas_variable,
tensorflow.cast
7,113
import tensorflow as tf tt_cores = [None] * num_dims for i in range(num_dims): curr_core_shape = (1, shape[0][i], shape[1][i], 1) tt_cores[i] = tf.zeros(curr_core_shape, dtype=dtype) return TensorTrain(tt_cores, shape, tt_rank)
tensorflow.zeros
7,114
import tensorflow as tf other_grads = strategy.reduce("sum", other_grads) other_train_op = dense_opt.apply_gradients(zip(other_grads, other_var)) with tf.control_dependencies([emb_train_op, other_train_op]): total_loss = strategy.reduce("sum", loss) total_loss = tf.identity(total_loss)
tensorflow.control_dependencies
7,115
import tensorflow as tf if config.decay is not None: self.var_ema = tf.train.ExponentialMovingAverage(config.decay)
tensorflow.train.ExponentialMovingAverage
7,116
import tensorflow as tf deconv_shape2 = image_net["pool3"].get_shape() W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2") b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2") conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"])) fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
tensorflow.shape
7,117
import tensorflow as tf @pytest.fixture def sqrt_diag(session_tf): return tf.convert_to_tensor(Datum.sqrt_diag_data) @pytest.fixture def K(session_tf): return tf.convert_to_tensor(Datum.K_data) @pytest.fixture def K_batch(session_tf): return tf.convert_to_tensor(Datum.K_batch_data)
tensorflow.convert_to_tensor
7,118
import tensorflow as tf fake_distribution = self._GetFakeDistribution() # Make a new session since we're playing with static shapes. [And below.] x = tf.placeholder_with_default( input=np.ones((6, 7, 2, 3, 5), dtype=np.float32), shape=None) dist = fake_distribution(batch_shape=[2, 3], event_shape=[5]) sample_shape = tf.convert_to_tensor([6, 7], dtype=tf.int32) y = dist._set_sample_static_shape(x, sample_shape) # We use as_list since TensorShape comparison does not work correctly for # unknown values, ie, Dimension(None). self.assertAllEqual([6, 7, 2, 3, 5], y.shape.as_list()) x = tf.placeholder_with_default(
tensorflow.convert_to_tensor
7,119
import tensorflow as tf average_across_timesteps=False, average_across_batch=True) self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0., trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name='new_learning_rate') self._lr_update = tf.assign(self._lr, self._new_lr) self.saver = tf.train.Saver(tf.global_variables()) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC:
tensorflow.train.GradientDescentOptimizer
7,120
import tensorflow as tf def __init__(self, sess, model, data, config, logger): self.model = model self.config = config self.sess = sess self.data = data self.logger = logger if not self.config.pretrain: # If not pretrain then initialize variables. self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) self.sess.run(self.init) def train(self): """Train the model for the number of epochs in config.num_epochs. Calls validate_epoch if config.use_val is set to true and per config.val_per_epoch. Returns:
tensorflow.global_variables_initializer
7,121
import tensorflow as tf for d in variable.get_shape()[1:].as_list(): dim *= d return tf.reshape(variable, shape=[-1, dim], name=name)
tensorflow.reshape
7,122
import tensorflow as tf bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0)) bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(_ln(c, gc, bc)) xs[idx] = h s = tf.concat(axis=1, values=[c, h])
tensorflow.matmul
7,123
import tensorflow as tf @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def add_eos_to_output_features(dataset, training, output_features='targets', eos=1): """Adds `EOS` to all features in `output_features`.""" del training if not isinstance(output_features, (list, tuple)): output_features = [output_features] def add_eos(x): for output_feature in output_features: x[output_feature] = tf.concat([x[output_feature], [eos]], axis=0) return x return dataset.map(add_eos) @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def generic_text_dataset_preprocess_fn(dataset, training=True, text_preprocess_fns=None, token_preprocess_fns=None, spm_path=None,
tensorflow.concat
7,124
import tensorflow as tf x = tf.keras.layers.AveragePooling2D(pool_size=7, strides=1, padding="valid", name="pool")(x) x = tf.reshape(x, shape=(-1, 1024)) self.logits = self.__fully_connected(name="fc_nsfw",
tensorflow.reshape
7,125
import tensorflow as tf utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads) def main(argv=None): keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image") #debug annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation") # annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="annotation") pred_annotation, logits = inference(image, keep_probability) tf.summary.image("input_image", image, max_outputs=2) tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2) tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2) #debug
tensorflow.placeholder
7,126
from tensorflow.python.framework import ops ops.RegisterShape("Elu")(common_shapes.unchanged_shape) ops.RegisterShape("Softplus")(common_shapes.unchanged_shape) ops.RegisterShape("Softsign")(common_shapes.unchanged_shape) @ops.RegisterShape("ReluGrad") @ops.RegisterShape("Relu6Grad") @ops.RegisterShape("EluGrad") @ops.RegisterShape("SoftplusGrad") @ops.RegisterShape("SoftsignGrad") def _BinaryElementwiseShape(op): """Returns same shape as both inputs to op. Args: op: Input operation.
tensorflow.python.framework.ops.RegisterShape
7,127
import tensorflow as tf return tf.to_float(res) def embed(self, x): """Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments. """ shape_x = common_layers.shape_list(x) x_flat = tf.reshape(x, [-1, 1]) c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2) shape = common_layers.shape_list(c) new_shape = shape new_shape.append(self.hparams.num_blocks) new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(self.hparams.hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) c_int = self.bit_to_int( c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) c_hot_flat = tf.reshape(
tensorflow.reshape
7,128
import tensorflow as tf cls = tf.sigmoid(cls) anchors = anchors.astype(np.float32) grid_shape = x_shape[1:3] # print(grid_shape) grid_h, grid_w = grid_shape[0], grid_shape[1] # print(grid_h,tf.range(grid_h)) grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h)) grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2] box_xy = (box_xy + tf.cast(grid, dtype)) * stride box_wh = tf.exp(box_wh) * anchors box_x1y1 = box_xy - box_wh / 2. box_x2y2 = box_xy + box_wh / 2. box = tf.concat([box_x1y1, box_x2y2], axis=-1) boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4))) objects.append(tf.reshape(obj, (x_shape[0], -1, 1))) classes.append(tf.reshape(cls, (x_shape[0], -1, num_classes))) boxes = tf.concat(boxes, axis=1) objects = tf.concat(objects, axis=1)
tensorflow.exp
7,129
import tensorflow as tf hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu,
tensorflow.matmul
7,130
import tensorflow as tf a = tf.minimum(m_bound, m0) x = 1 - tf.pow(a / m0, 2)
tensorflow.pow
7,131
import tensorflow as tf # segment. # # If you want to use the token-level output, use model_bak.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss)
tensorflow.variable_scope
7,132
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 _ops.RegisterShape("TestStringOutput")(None) def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary()
tensorflow.core.framework.op_def_pb2.OpList
7,133
from tensorflow.python.platform import gfile self.assertFalse(gfile.Exists(s1)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
tensorflow.python.platform.gfile.Exists
7,134
import tensorflow as tf # This is not implemented as this feature is only used for plotting purposes. raise NotImplementedError pXnew = Gaussian(Xnew_mu, Xnew_var) num_data = tf.shape(Xnew_mu)[0] # number of new inputs (N) num_ind = tf.shape(q_mu)[0] # number of inducing points (M) num_func = tf.shape(q_mu)[1] # output dimension (D) q_sqrt_r = tf.matrix_band_part(q_sqrt, -1, 0) # D x M x M eKuf = tf.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1)
tensorflow.shape
7,135
import tensorflow as tf class PyOpTest(tf.test.TestCase): def testBasic(self): def my_func(x, y): return np.sinh(x) + np.cosh(y) # scalar with self.test_session(): x = tf.constant(1.0, tf.float32) y = tf.constant(2.0, tf.float32) z = tf.py_func(my_func, [x, y], [tf.float32]) self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32)) # array with self.test_session(): x = tf.constant([1.0, 2.0], tf.float64) y = tf.constant([2.0, 3.0], tf.float64) z = tf.py_func(my_func, [x, y], [tf.float64]) self.assertAllEqual( z[0].eval(),
tensorflow.constant
7,136
import tensorflow as tf def get_sorted_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes, edge_types) return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \ tf.SparseTensor(*sp_returns[6:]) def sample_fanout(nodes, edge_types, counts, default_node=-1): """ Sample multi-hop neighbors of nodes according to weight in graph. Args: nodes: A 1-D `Tensor` of `int64`. edge_types: A list of 1-D `Tensor` of int32. Specify edge types to filter outgoing edges in each hop. counts: A list of `int`. Specify the number of sampling for each node in each hop.
tensorflow.SparseTensor
7,137
from tensorflow.python.client import graph_util output_count = np.prod(output_shape.as_list()) return ops.OpStats("flops", (output_count * filter_in_depth * filter_height * filter_width * 2)) @ops.RegisterStatistics("Conv2D", "weight_parameters") def _calc_conv_weight_params(graph, node): """Calculates the on-disk size of the weights for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_in_depth = int(filter_shape[2]) filter_out_depth = int(filter_shape[3]) return ops.OpStats("weight_parameters", (filter_height * filter_width *
tensorflow.python.client.graph_util.tensor_shape_from_node_def_name
7,138
import tensorflow as tf queue_ops = [] # For each other worker, add an entry in a queue, signaling that it can # finish this step. token = tf.constant(False) with tf.control_dependencies(enqueue_after_list): for i, q in enumerate(sync_queues): if i == self.task_index: queue_ops.append(tf.no_op()) else: queue_ops.append(q.enqueue(token)) # Drain tokens off queue for this worker, one for each other worker. queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))
tensorflow.no_op
7,139
import tensorflow as tf w_init = tf.random_normal_initializer(0.,.1); with tf.variable_scope(scope+"_"+str(par_idx)): # state and target self.state = tf.placeholder(tf.float32, [None,num_state], "state") self.target = tf.placeholder(tf.float32, [None,1], name="target") # layers l_c = tf.layers.dense(self.state, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc') self.value_estimate = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # estimated value for state # loss and optimizer self.loss = tf.reduce_mean(tf.square(tf.subtract(self.value_estimate, self.target))) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.train_op = self.optimizer.minimize(
tensorflow.layers.dense
7,140
import tensorflow as tf # multiple devices (executors) in TensorFlow. import tensorflow as tf from tensorflow.python.framework import ops ops.reset_default_graph() # To find out where placement occurs, set 'log_device_placement' sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a') b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') c = tf.matmul(a, b) # Runs the op. print(sess.run(c)) # If we load a graph and want device placement to be forgotten, # we set a parameter in our session: config = tf.ConfigProto()
tensorflow.constant
7,141
import tensorflow as tf mode=mode, loss=total_loss, train_op=train_op, training_hooks=[logging_hook]) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights)
tensorflow.argmax
7,142
import tensorflow as tf def __init__(self,name,input_dim,out_dim, k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NCHW') : with tf.variable_scope(name) : self.w = tf.get_variable('w', [k_h, k_w, out_dim, input_dim],
tensorflow.variable_scope
7,143
import tensorflow as tf return relu def avg_pool(self, bottom, kernal_size = 2, stride = 2, name = "avg"): return tf.nn.avg_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='VALID', name=name) def max_pool(self, bottom, kernal_size = 2, stride = 2, name = "max"): return tf.nn.max_pool(bottom, ksize=[1, kernal_size, kernal_size, 1], strides=[1, stride, stride, 1], padding='SAME', name=name) def conv_layer(self, bottom, kernal_size, in_channels, out_channels, stride, name): with tf.variable_scope(name): filt, conv_biases = self.get_conv_var(kernal_size, in_channels, out_channels, name) conv = tf.nn.conv2d(bottom, filt, [1,stride,stride,1], padding='SAME') bias = tf.nn.bias_add(conv, conv_biases) tf.summary.histogram('weight', filt) tf.summary.histogram('bias', conv_biases) return bias def conv_bn_relu(self, bottom,name, kernel_size, output_channels, initializer,stride=1, bn=False,training=False,relu=True): input_channels = bottom.get_shape().as_list()[-1] with tf.variable_scope(name) as scope: kernel = self.variable('weights', [kernel_size, kernel_size, input_channels, output_channels], initializer, regularizer=tf.contrib.layers.l2_regularizer(0.0005)) conv = tf.nn.conv2d(bottom, kernel, [1, stride, stride, 1], padding='SAME') biases = self.variable('biases', [output_channels], tf.constant_initializer(0.0)) conv_layer = tf.nn.bias_add(conv, biases)
tensorflow.nn.bias_add
7,144
from tensorflow.python.framework import ops @ops.RegisterShape("Div") @ops.RegisterShape("Equal") @ops.RegisterShape("Greater") @ops.RegisterShape("GreaterEqual") @ops.RegisterShape("Less") @ops.RegisterShape("LessEqual") @ops.RegisterShape("LogicalAnd") @ops.RegisterShape("LogicalOr") @ops.RegisterShape("Maximum") @ops.RegisterShape("Minimum") @ops.RegisterShape("Mod") @ops.RegisterShape("Mul")
tensorflow.python.framework.ops.RegisterShape
7,145
import tensorflow as tf def conv(inp, name, size, out_channels, strides=[1, 1, 1, 1], dilation=None, padding='SAME', apply_relu=True, alpha=0.0,bias=True, initializer=tf.contrib.layers.xavier_initializer_conv2d()): batch_size = inp.get_shape().as_list()[0] res1 = inp.get_shape().as_list()[1] res2 = inp.get_shape().as_list()[1] in_channels = inp.get_shape().as_list()[3] with tf.variable_scope(name): W = get_variable("W", shape=[size, size, in_channels, out_channels], dtype=tf.float32, initializer=initializer, regularizer=tf.nn.l2_loss) b = get_variable("b", shape=[1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer(),trainable=bias) if dilation: assert(strides == [1, 1, 1, 1]) out = tf.add(tf.nn.atrous_conv2d(inp, W, rate=dilation, padding=padding), b, name='convolution') out.set_shape([batch_size, res1, res2, out_channels]) else: out = tf.add(tf.nn.conv2d(inp, W, strides=strides, padding=padding), b, name='convolution') if apply_relu: out = relu(out, alpha=alpha, name='relu') return out
tensorflow.zeros_initializer
7,146
import tensorflow as tf eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest}
tensorflow.name_scope
7,147
import tensorflow as tf assert (len(stride_dims) == 2) # stride height and width filter_h, filter_w = filter_dims stride_h, stride_w = stride_dims with tf.variable_scope(scope): pool = tf.nn.avg_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding) return pool
tensorflow.variable_scope
7,148
import tensorflow as tf # Some preprocessing adj_norm = preprocess_graph(adj) # Define placeholders placeholders = { 'features': tf.sparse_placeholder(tf.float32), 'adj': tf.sparse_placeholder(tf.float32), 'adj_orig': tf.sparse_placeholder(tf.float32), 'dropout': tf.placeholder_with_default(0., shape=()) }
tensorflow.sparse_placeholder
7,149
import tensorflow as tf features={ 'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64), }) if FLAGS.contrast_norm == 'areafactor': image = tf.decode_raw(features['image_raw'], tf.float32) else: image = tf.decode_raw(features['image_raw'], tf.uint8) image = tf.cast(image, tf.float32) * (1. / 255) image.set_shape(np.prod([FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size])) image = tf.reshape(image, [FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size, 1]) image = image - 0.5 # Convert label from a scalar uint8 tensor to an int32 scalar. label = tf.cast(features['label'], tf.int32) return image, label def inputs(name, batch_size, num_epochs): """Reads input data num_epochs times.
tensorflow.reshape
7,150
import tensorflow as tf state[st_idx]=featurize_state(state[st_idx]); feed_dict = { self.state: state, self.target: target } _, loss = sess.run([self.train_op, self.loss], feed_dict) return loss """ For Pendulum-v0 """ class PolicyEstimator_Pendulum(): def __init__(self, entropy_beta=0.01, learning_rate=0.01, par_idx=0,scope="policy_estimator"): w_init = tf.random_normal_initializer(0.,.1); with tf.variable_scope(scope+"_"+str(par_idx)): # state, target and action self.state = tf.placeholder(tf.float32, [None,num_state], name="state") self.target = tf.placeholder(tf.float32,[None,1], name="target") self.a_his = tf.placeholder(tf.float32, [None, num_action], name="action_hist") # layers l_a = tf.layers.dense(self.state, 200, tf.nn.relu6, kernel_initializer=w_init, name='la') self.mu = tf.layers.dense(l_a, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value self.sigma = tf.layers.dense(l_a, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance # wrap output self.mu = self.mu * action_bound[1]; self.sigma = self.sigma + 1e-4 # get action from distribution self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) self.action = tf.squeeze(self.normal_dist.sample(1),axis=0);
tensorflow.placeholder
7,151
import tensorflow as tf # 1. Configure the RNN lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0) outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32) # Slice to keep only the last cell of the RNN outputs = outputs[-1] #print('last outputs={}'.format(outputs)) # Output is result of linear activation of last layer of RNN weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS])) bias = tf.Variable(tf.random_normal([N_OUTPUTS])) predictions = tf.matmul(outputs, weight) + bias # 2. Loss function, training/eval ops if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL: loss = tf.losses.mean_squared_error(labels, predictions) train_op = tf.contrib.layers.optimize_loss( loss = loss, global_step = tf.train.get_global_step(), learning_rate = 0.01, optimizer = "SGD")
tensorflow.random_normal
7,152
import tensorflow as tf self.loss_db_sum = tf.summary.scalar("db_loss", self.D_B_loss) self.loss_da_sum = tf.summary.scalar("da_loss", self.D_A_loss) self.loss_d_sum = tf.summary.scalar("d_loss",self.discriminator_loss) self.db_loss_real_sum = tf.summary.scalar("db_loss_real", self.D_B_loss_real) self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.D_B_loss_fake) self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.D_A_loss_real) self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.D_A_loss_fake) self.d_sum = tf.summary.merge( [self.loss_da_sum, self.da_loss_real_sum, self.da_loss_fake_sum, self.loss_db_sum, self.db_loss_real_sum, self.db_loss_fake_sum, self.loss_d_sum] )
tensorflow.summary.scalar
7,153
import tensorflow as tf b,name=name) def get_variables(self): return {'w':self.w,'b':self.b} class DilatedConv3D(object) : def __init__(self,name,input_dim,output_dim,k_t=2,k_h=3,k_w=3,d_t=2,d_h=1,d_w=1, stddev=0.02, data_format='NDHWC') : with tf.variable_scope(name) : assert(data_format == 'NDHWC') self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.strides = [1,1,1] self.dilates = [d_t, d_h, d_w] def __call__(self,input_var,name=None) : k_t,k_h,k_w,_,_ = self.w.get_shape().as_list() _t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC") return tf.nn.bias_add( tf.nn.convolution(_t, self.w, strides=self.strides, dilation_rate=self.dilates, padding='VALID'), self.b,name=name)
tensorflow.constant_initializer
7,154
import tensorflow as tf def squared_loss(y_pred,labels): return tf.reduce_mean((y_pred - labels)**2) def abs_loss(y_pred,labels): return tf.reduce_mean(tf.abs(y_pred - labels)) def binary_cross_entropy_loss(logits,labels): return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,logits=logits)) self.images_real = tf.placeholder(tf.float32,[None,self.image_size,self.image_size,self.input_dim + self.output_dim]) self.image_real_A = self.images_real[:,:,:,:self.input_dim] self.image_real_B = self.images_real[:,:,:,self.input_dim:self.input_dim + self.output_dim] self.images_fake_B = self.build_generator(self.image_real_A,reuse=False,name='generator_AB') self.images_fake_A = self.build_generator(self.images_fake_B,reuse=False,name='generator_BA') self.images_fake_A_ = self.build_generator(self.image_real_B,reuse=True,name='generator_BA')
tensorflow.nn.sigmoid_cross_entropy_with_logits
7,155
import tensorflow as tf with tf.variable_scope("attention"): inputs_ = tf.nn.relu( dense(d_inputs, hidden, use_bias=False, scope="inputs")) memory_ = tf.nn.relu( dense(d_memory, hidden, use_bias=False, scope="memory")) outputs = tf.matmul(inputs_, tf.transpose( memory_, [0, 2, 1])) / (hidden ** 0.5) mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1]) logits = tf.nn.softmax(softmax_mask(outputs, mask)) outputs = tf.matmul(logits, memory)
tensorflow.transpose
7,156
import tensorflow as tf # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask":
tensorflow.constant
7,157
import tensorflow as tf gen_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05) dis_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05)
tensorflow.train.RMSPropOptimizer
7,158
import tensorflow as tf del dynamics if __name__ == "__main__": tf.enable_eager_execution() tf.test.main()
tensorflow.enable_eager_execution
7,159
import tensorflow as tf config.gpu_options.allow_growth = True config.allow_soft_placement = True # Train with tf.Session(config=config) as sess: try: summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph)
tensorflow.Session
7,160
import tensorflow as tf size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
tensorflow.expand_dims
7,161
import tensorflow as tf if targets in [0., 1.]: entropy = 0. else: entropy = - targets*tf.log(targets) - (1. - targets)*tf.log(1. - targets) return sigmoid_ce_with_logits(logits, tf.ones_like(logits)*targets) - entropy def gradient_difference_loss(x, y): x_h_diff = x[:, 1:] - x[:, :-1]
tensorflow.ones_like
7,162
import tensorflow as tf """ eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
tensorflow.constant_initializer
7,163
import tensorflow as tf self.variable_mgr.append_apply_gradients_ops( gradient_state, opt, clipped_grads, training_ops) train_op = tf.group(*(training_ops + update_ops + extra_nccl_ops)) with tf.device(self.cpu_device): if self.task_index == 0 and FLAGS.summary_verbosity > 0: tf.summary.scalar('learning_rate', learning_rate) tf.summary.scalar('total_loss', total_loss) for grad, var in avg_grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) fetches = [train_op, total_loss] + enqueue_ops return (enqueue_ops, fetches) def add_forward_pass_and_gradients( self, host_images, host_labels, nclass, phase_train, device_num, input_data_type, data_type, input_nchan, use_synthetic_gpu_images, gpu_copy_stage_ops, gpu_compute_stage_ops, gpu_grad_stage_ops): """Add ops for forward-pass and gradient computations."""
tensorflow.summary.histogram
7,164
import tensorflow as tf W = tf.get_variable("W", shape=[total_filters, nb_classes], initializer=tf.contrib.layers.xavier_initializer()) # add final layer bias
tensorflow.contrib.layers.xavier_initializer
7,165
import tensorflow as tf tf.data.experimental.map_and_batch( lambda fname, label: (mapper(tf.read_file(fname)), label),
tensorflow.read_file
7,166
import tensorflow as tf end_logits = tf.contrib.layers.layer_norm( end_logits, begin_norm_axis=-1) end_logits = tf.layers.dense( end_logits, 1, kernel_initializer=initializer,
tensorflow.layers.dense
7,167
import tensorflow as tf padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])] padding_shapes[fields.InputDataFields. groundtruth_keypoint_visibilities] = padding_shape padded_tensor_dict = {} for tensor_name in tensor_dict: padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd( tensor_dict[tensor_name], padding_shapes[tensor_name]) # Make sure that the number of groundtruth boxes now reflects the # padded/clipped tensors. if fields.InputDataFields.num_groundtruth_boxes in padded_tensor_dict: padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = ( tf.minimum( padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes], max_num_boxes)) return padded_tensor_dict def augment_input_data(tensor_dict, data_augmentation_options): """Applies data augmentation ops to input tensors. Args: tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields. data_augmentation_options: A list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values.
tensorflow.minimum
7,168
import tensorflow as tf from sklearn.preprocessing import normalize import cv2 import numpy as np import glob import json from keras.layers import merge from keras.layers.core import Lambda from keras.models import Model import tensorflow as tf def make_parallel(model, gpu_count): def get_slice(data, idx, parts): shape = tf.shape(data) size = tf.concat(0, [shape[:1] // parts, shape[1:]]) stride = tf.concat(0, [shape[:1] // parts, shape[1:] * 0]) start = stride * idx return tf.slice(data, start, size) outputs_all = [] for i in range(len(model.outputs)): outputs_all.append([]) # Place a copy of the model on each GPU, each getting a slice of the batch for i in range(gpu_count): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i) as scope:
tensorflow.shape
7,169
import tensorflow as tf num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes, global_pool=True) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Conv2d_13_pointwise'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024]) def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)
tensorflow.global_variables_initializer
7,170
import tensorflow as tf return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = (metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels ]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn
tensorflow.contrib.tpu.TPUEstimatorSpec
7,171
import tensorflow as tf x = tf.reshape(scale_init, [1, num_units]) * (x - tf.reshape(m_init, [1, num_units])) else: V = maybe_avg(V) g = maybe_avg(g) b = maybe_avg(b) x = tf.matmul(x, V) scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0])) x = tf.reshape(scaler, [1, num_units]) * x + tf.reshape(b, [1, num_units]) return x def sample_from_discretized_mix_logistic(l, nr_mix): """ This function is copied from https://github.com/openai/pixel-cnn/blob/master/pixel_cnn_pp/nn.py in reference to:
tensorflow.reshape
7,172
from tensorflow.python.ops import variable_scope as vs self._outer_graph = ops.get_default_graph() self._vscope = vs.get_variable_scope()
tensorflow.python.ops.variable_scope.get_variable_scope
7,173
import tensorflow as tf "targets": tf.FixedLenFeature([1], tf.int64), } data_items_to_decoders = None return (data_fields, data_items_to_decoders) def txt_line_iterator(txt_path): """Iterate through lines of file.""" with tf.gfile.Open(txt_path) as f: for line in f: yield line.strip() def text2text_txt_iterator(source_txt_path, target_txt_path): """Yield dicts for Text2TextProblem.generate_samples from lines of files.""" for inputs, targets in zip(
tensorflow.gfile.Open
7,174
import tensorflow as tf activation=tf.nn.relu, padding="same") x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2), activation=tf.nn.relu, padding="same") flat_x = tf.layers.flatten(x) if self.use_epochs: epoch = features["epoch"] + tf.zeros([x_shape[0]], dtype=tf.int32) # Randomly set epoch to 0 in some cases as that's the inference value.
tensorflow.layers.flatten
7,175
import tensorflow as tf """ words = tf.sparse.to_dense(tf.strings.split([text]), default_value='')[0] num_words = tf.size(words) ids_ta = tf.TensorArray(tf.int32, 0, dynamic_size=True)
tensorflow.size
7,176
import tensorflow as tf self._score_summaries.update(self._anchor_targets) return rpn_labels def _proposal_target_layer(self, rois, roi_scores, name): with tf.variable_scope(name): # 这里的index是对于cfg.FLAGS.batch_size=256 而言 # rois (0, x1, y1, x2, y2),coming from RPN 然后再减少至256个 # bbox_target (ndarray): N x 4K blob of regression targets # bbox_inside_weights (ndarray): N x 4K blob of loss weights
tensorflow.variable_scope
7,177
import tensorflow as tf tf.flags.DEFINE_string(
tensorflow.flags.DEFINE_string
7,178
from tensorflow.python.framework import ops @ops.RegisterShape("SegmentMax") @ops.RegisterShape("SegmentMean") @ops.RegisterShape("SegmentMin") @ops.RegisterShape("SegmentProd") @ops.RegisterShape("SegmentSum") def _SegmentReductionShape(op): """Common shape function for segment reduction ops.""" data_shape = op.inputs[0].get_shape() segment_ids_shape = op.inputs[1].get_shape() segment_ids_shape.assert_has_rank(1) return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])] @ops.RegisterShape("SparseSegmentMean") @ops.RegisterShape("SparseSegmentSum") def _SparseSegmentReductionShape(op): """Common shape function for sparse segment reduction ops.""" data_shape = op.inputs[0].get_shape() indices_shape = op.inputs[1].get_shape() indices_shape.assert_has_rank(1) segment_ids_shape = op.inputs[2].get_shape() segment_ids_shape.assert_has_rank(1) indices_shape.assert_is_compatible_with(segment_ids_shape) return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])] @ops.RegisterShape("SparseSegmentMeanGrad") def _SparseSegmentMeanGradShape(op): """Shape function for the SparseSegmentMeanGrad op."""
tensorflow.python.framework.ops.RegisterShape
7,179
from tensorflow.python.framework import ops for m in self._monitors: if "session" in inspect.getargspec(m.end).args: m.end(session=session) else: m.end() def _as_graph_element(obj): """Retrieves Graph element.""" graph = ops.get_default_graph() if not isinstance(obj, six.string_types): if not hasattr(obj, "graph") or obj.graph != graph: raise ValueError("Passed %s should have graph attribute that is equal " "to current graph %s." % (obj, graph)) return obj if ":" in obj: element = graph.as_graph_element(obj) else:
tensorflow.python.framework.ops.get_default_graph
7,180
import tensorflow as tf metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error)) metrics.append(tf.summary.scalar('training/improvement', improvement)) metrics.append(tf.summary.scalar('training/improvement_abs', tf.nn.relu(improvement))) metrics.append(tf.summary.histogram('training/improvement_abs_hist', nut.nan_to_zero(improvement))) metrics.append(tf.summary.scalar('training/improvement_pairwise', tf.reduce_mean(pairwise_improvement_bool))) metrics.append(tf.summary.histogram('training/improvement_pairwise_hist', pairwise_improvement_bool)) self.eval_summs = tf.summary.merge(metrics) def _build_embedding_saver(self, sess):
tensorflow.summary.histogram
7,181
import tensorflow as tf tf.summary.image('input', images) tf.summary.image('score_map', score_maps) tf.summary.image('score_map_pred', f_score * 255) tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1]) tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1]) tf.summary.image('training_masks', training_masks) tf.summary.scalar('model_loss', model_loss) tf.summary.scalar('total_loss', total_loss) return total_loss, model_loss
tensorflow.summary.scalar
7,182
import tensorflow as tf def mgpu_train(*xs): gpu_ops = [] gpu_grads = [] xs = (tf.split(x, n_gpu, 0) for x in xs) for i, xs in enumerate(zip(*xs)): do_reuse = True if i > 0 else None with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse): clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse) if lm_coef > 0: train_loss = tf.reduce_mean(clf_losses) + lm_coef*tf.reduce_mean(lm_losses) else: train_loss = tf.reduce_mean(clf_losses)
tensorflow.get_variable_scope
7,183
import tensorflow as tf small_constant_for_finite_diff: a `float`, Small constant for finite difference method perturb_norm_length: a `float`, Norm length of adversarial perturbation to be optimized with validatio Returns: a `float` `scalar`, KL divergence. """ logits = tf.stop_gradient(logits) weights = _end_of_seq_mask(labels, vocab_size) perturbs = [_mask_by_length(tf.random_normal(shape=tf.shape(emb)), length) for emb in embedded] for _ in range(num_power_iteration): perturbs = [_scale_l2(d, small_constant_for_finite_diff) for d in perturbs] d_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])
tensorflow.stop_gradient
7,184
import tensorflow as tf class FeedForwardCategoricalPolicy(PolicyBase): """Feed-forward categorical.""" def body(self, features): observations = features["inputs_raw"] observations = tf.cast(observations, tf.float32) flat_observations = tf.layers.flatten(observations) with tf.variable_scope("policy"): x = flat_observations for size in self.hparams.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) logits = tf.expand_dims(logits, axis=1) with tf.variable_scope("value"): x = flat_observations for size in self.hparams.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1) logits = clip_logits(logits, self.hparams) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicy(PolicyBase): """Small cnn network with categorical output."""
tensorflow.variable_scope
7,185
import tensorflow as tf # a_fc2_ = tf.layers.dense(a_fc1_, 128, tf.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc2_t') # a_fc3_ = tf.layers.dense(a_fc2_, 64, tf.nn.relu, kernel_initializer=w_initializer, # bias_initializer=b_initializer, name='agent_fc3_t') self.q_next = tf.layers.dense(a_fc1_, self.num_a, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='q_t') # [batch*n_agents, 1] self.q_selected = tf.reduce_sum(tf.multiply(self.q_eval, self.a), axis=1) # ------------------ build mixing_net ------------------ with tf.variable_scope('mixing_net'): # [batch, n_agents] self.q_concat = tf.reshape(self.q_selected, [-1, self.n_agents]) self.q_concat_ =tf.reshape(self.q_m_, [-1, self.n_agents]) with tf.variable_scope('eval_hyper'): self.Q_tot = Qmix_mixer(self.q_concat, self.S, self.num_global_s, self.n_agents, 32) with tf.variable_scope('target_hyper'): self.Q_tot_ = Qmix_mixer(self.q_concat_, self.S_, self.num_global_s, self.n_agents, 32) # with tf.variable_scope('layer_mix_eval'): # lin1 = tf.matmul(tf.reshape(self.q_concat, shape=[-1, 1, self.n_agents]), self.w1) + tf.reshape(self.b1, shape=[-1, 1, 32]) # a1 = tf.nn.elu(lin1, name='a1') # self.Q_tot = tf.reshape(tf.matmul(a1, self.w2), shape=[-1, 1]) + self.b2 # with tf.variable_scope('layer_mix_target'):
tensorflow.reshape
7,186
import tensorflow as tf def _bn(self, name, x): with tf.variable_scope(name): moving_average_decay = 0.9 decay = moving_average_decay batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2]) mu = tf.get_variable('mu', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, mu) tf.add_to_collection('mu_sigma_bn', mu) sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer(), trainable=False) tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma) tf.add_to_collection('mu_sigma_bn', sigma) beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32, initializer=tf.zeros_initializer()) gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32, initializer=tf.ones_initializer()) # BN when training update = 1.0 - decay update_mu = mu.assign_sub(update * (mu - batch_mean)) update_sigma = sigma.assign_sub(update * (sigma - batch_var)) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma) mean, var = tf.cond(self.train_flag, lambda: (batch_mean, batch_var), lambda: (mu, sigma))
tensorflow.add_to_collection
7,187
import tensorflow as tf self.inputs = inputs with tf.variable_scope(name): self.embeddings = tf.get_variable( name='embeddings',
tensorflow.variable_scope
7,188
import tensorflow as tf unconnected_gradients=tf.UnconnectedGradients.NONE) train_op = optimizer.apply_gradients(zip(grads, tf_sparse_demo.trainable_variables)) with tf.control_dependencies([train_op]): loss = tf.identity(loss) return loss, embedding_vector dataset = utils.tf_dataset(*random_samples, batchsize=args.global_batch_size, to_sparse_tensor=True, repeat=1) train_iterator = dataset.make_initializable_iterator() iterator_init = train_iterator.initializer inputs, labels = train_iterator.get_next() graph_results = _train_step(inputs, labels, training=True) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) restore_op = list() for i, embedding_weight in enumerate(tf_sparse_demo.embedding_weights): restore_op.append(embedding_weight.assign(tf.concat(init_tensors[i], axis=0))) emb_values = list() for embedding_weight in tf_sparse_demo.embedding_weights: if args.save_params: filepath = r"./embedding_variables/" utils.try_make_dirs(filepath) emb_values.append(embedding_weight.read_value()) else: emb_values = tf.constant(1.0)
tensorflow.global_variables_initializer
7,189
import tensorflow as tf # Target for value fn regression # We update the vf towards the min of two Q-functions in order to # reduce overestimation bias from function approximation error. v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi) value_loss = 0.5 * tf.reduce_mean(((value_fn - v_backup) ** 2)*self.weight_ph) #value_for_priority = tf.reduce_mean((value_fn - v_backup) ** 2,1) regularizervf = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope='model/values_fn')
tensorflow.stop_gradient
7,190
from tensorflow.python.ops import math_ops def moving_average(name, value, decay): moving_average_variable = vs.get_variable( name, shape=value.get_shape(), dtype=value.dtype, initializer=init_ops.zeros_initializer(), trainable=False) return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay) variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance)) max_norms = math_ops.exp(mean + std_factor * std) return max_norms, mean def adaptive_clipping_fn(std_factor=2., decay=0.95,
tensorflow.python.ops.math_ops.minimum
7,191
import tensorflow as tf test_pre = tf.reshape(test_inf, [testnum, classnum]) correct_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1))
tensorflow.argmax
7,192
import tensorflow as tf if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = tf.get_variable( 'softmax_w', [hidden_size, vocab_size], dtype=tf.float32) softmax_b = tf.get_variable('softmax_b', [vocab_size], dtype=tf.float32) logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b) logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size]) loss = tf.contrib.seq2seq.sequence_loss( logits, input_.targets, tf.ones([self.batch_size, self.num_steps], dtype=tf.float32), average_across_timesteps=False,
tensorflow.nn.xw_plus_b
7,193
import tensorflow as tf 'resnet_size', 50, 'The size of the ResNet model to use.') tf.app.flags.DEFINE_integer( 'train_epochs', None, 'The number of epochs to use for training.') tf.app.flags.DEFINE_integer( 'batch_size', 12, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_string( 'data_format', 'channels_first', # 'channels_first' or 'channels_last' 'A flag to override the data format used in the model. channels_first ' 'provides a performance boost on GPU but is not always compatible ' 'with CPU. If left unspecified, the data format will be chosen ' 'automatically based on whether TensorFlow was built for CPU or GPU.') tf.app.flags.DEFINE_float( 'negative_ratio', 3., 'Negative ratio in the loss function.') tf.app.flags.DEFINE_float( 'match_threshold', 0.56, 'Matching threshold in the loss function.') tf.app.flags.DEFINE_float( 'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.') # optimizer related configuration tf.app.flags.DEFINE_float( 'weight_decay', 0.0005, 'The weight decay on the model weights.') tf.app.flags.DEFINE_float( 'momentum', 0.9, 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') tf.app.flags.DEFINE_float(
tensorflow.app.flags.DEFINE_float
7,194
import tensorflow as tf #Get conditioning status self.local_condition, self.global_condition = self._check_conditions() with tf.device('/cpu:0'): # Create placeholders for inputs and targets. Don't specify batch size because we want # to be able to feed different batch sizes at eval time. if is_scalar_input(hparams.input_type): input_placeholder = tf.placeholder(tf.float32, shape=(None, 1, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.float32, shape=(None, None, 1), name='audio_targets') target_type = tf.float32 else: input_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs') target_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets') target_type = tf.int32
tensorflow.placeholder
7,195
import tensorflow as tf offset: (height, width) Returns: """ with tf.variable_scope('anchor_generator'): if offset is None: offset = [stride[0]/2, stride[1]/2] features_width = tf.cast(features_width, tf.int32) features_height = tf.cast(features_height, tf.int32) scales = tf.convert_to_tensor(scales, dtype=tf.float32) ratios = tf.convert_to_tensor(ratios, dtype=tf.float32) offset = tf.convert_to_tensor(offset, dtype=tf.float32) scales_grid, ratios_grid = tf.meshgrid(scales, ratios) scales_grid = tf.reshape(scales_grid, [-1, 1]) ratios_grid = tf.reshape(ratios_grid, [-1, 1]) ratio_sqrts = tf.sqrt(ratios_grid) heights = scales_grid / ratio_sqrts * base_size[1] widths = scales_grid * ratio_sqrts * base_size[0] x_centers = tf.cast(tf.range(features_width), tf.float32)
tensorflow.convert_to_tensor
7,196
import tensorflow as tf geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) # loss = tf.maximum(0.0, tf.math.abs(tgt_larg - pred_larg) - tf.math.abs(tgt_small - pred_small)) loss = tf.reduce_mean(loss)
tensorflow.where
7,197
import tensorflow as tf input_tensor_dict = { fields.InputDataFields.image: tf.placeholder(tf.float32, [None, None, 3]), fields.InputDataFields.image_additional_channels:
tensorflow.placeholder
7,198
import tensorflow as tf # X shape (p_s * b_s * i_s) net_input = tf.einsum('pbi,pio->pbo', X, self.model_DMW) + self.model_tiled_b
tensorflow.einsum
7,199