seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32), scale_diag=tf.ones(D_int, tf.float32)) Y = YDistr.sample(N_int) T = 1.0/(2.0*N*tf.sqrt(m.pi*y)) A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2) A = tf.reduce_sum(phi_sampling(A0/(4*y), D)) B0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(Y, 0), tf.expand_dims(Y, 1)), axis=2) B = tf.reduce_sum(phi_sampling(B0/(4*y), D))
tensorflow.expand_dims
5,000
import tensorflow as tf """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
tensorflow.argmax
5,001
import tensorflow as tf with tf.variable_scope('discriminator') as scope: gradients = tf.gradients(self.discriminator(interpolates, reuse=True, scope=scope, **disc_kwargs)[1], [interpolates])[0] # Reduce over all but the first dimension slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=list(range(1, ndims)))) gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2) self.loss_d_rot += lam * gradient_penalty train_vars = tf.trainable_variables() d_params = [v for v in train_vars if v.name.startswith(name + '/discriminator/')] g_params = [v for v in train_vars if v.name.startswith(name + '/generator/')]
tensorflow.reduce_mean
5,002
import tensorflow as tf def get_params(self): """See base class.""" return {}, {} def encode(self, x, encode_params): """See base class.""" del encode_params # Unused. return {self.ENCODED_VALUES_KEY: tf.reduce_mean(x, keepdims=True)} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class."""
tensorflow.reduce_mean
5,003
import tensorflow as tf res = tf.sparse_to_dense( indices_input, [n_elem, n_indices], 1., 0., name="flat_one_hot") res = tf.reshape(res, [elem for elem in shape] + [n_indices])
tensorflow.reshape
5,004
import tensorflow as tf :return: """ # Inverse of a softplus function, so that the value of the standard deviation # will be equal to what the user specifies, but we can still enforce positivity # by wrapping the standard deviation in the softplus function. # standard_dev = tf.log(tf.exp(standard_dev) - 1.0) * tf.ones(shape) # it's important to initialize variances with care, otherwise the model takes too long to converge sigma_min = 1-1/10 sigma_max = 1+1/10 rho_max_init = tf.log(tf.exp(sigma_max) - 1.0) rho_min_init = tf.log(tf.exp(sigma_min) - 1.0) std_init = tf.random_uniform_initializer(rho_min_init, rho_max_init) # Initialize the mean mean = tf.get_variable(name + "_mean", shape, dtype=dtype) # Initialize the standard deviation pre_sigma = tf.get_variable(name + "_standard_deviation", shape, initializer=std_init, dtype=dtype) standard_deviation = tf.nn.softplus(pre_sigma) + 1e-5
tensorflow.exp
5,005
import tensorflow as tf
tensorflow.placeholder
5,006
from tensorflow.python.framework import ops # allowing instances of the class to be used as tensors. def _tensor_conversion(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion) if not TF_23: ops.register_dense_tensor_like_type(ReplicatedVariable)
tensorflow.python.framework.ops.register_tensor_conversion_function
5,007
import tensorflow as tf eval_batch_size=FLAGS.eval_batch_size, ) else: estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params={ "batch_size": FLAGS.train_batch_size if FLAGS.do_train else FLAGS.eval_batch_size, }, ) if FLAGS.do_train and FLAGS.do_eval: tf.logging.info("***** Running training *****") tf.logging.info(" Training batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True, ) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False,
tensorflow.logging.info
5,008
from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell if encoder.cell_type.lower() == 'lstm': cell = CellWrapper(BasicLSTMCell(encoder.cell_size, reuse=reuse))
tensorflow.contrib.rnn.BasicLSTMCell
5,009
import tensorflow as tf ''' import keras.backend as K import tensorflow as tf from nvidia_info import get_memory_info memory_info = get_memory_info(0) total_memory = memory_info[1] memory_limit = int(fraction*total_memory) print(memory_info) if tf.version.VERSION[0]=="2": gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)]) else: gpu_options = tf.GPUOptions(allow_growth=allow_growth, per_process_gpu_memory_fraction=fraction) config = tf.ConfigProto(gpu_options=gpu_options) session = tf.Session(config=config) K.set_session(session)
tensorflow.config.experimental.list_physical_devices
5,010
import tensorflow as tf def maybe_avg(v): if ema is not None and not init: v = tf.cond(training, lambda: v, lambda: ema.average(v)) return v if init: x = tf.matmul(x, tf.nn.l2_normalize(V.initialized_value(), 0)) init_scale = .01 m_init, v_init = tf.nn.moments(x, [0]) scale_init = init_scale / tf.sqrt(v_init + 1e-10) with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]): x = tf.reshape(scale_init, [1, num_units]) * (x - tf.reshape(m_init, [1, num_units])) else: V = maybe_avg(V) g = maybe_avg(g) b = maybe_avg(b)
tensorflow.nn.moments
5,011
import tensorflow as tf if train and pdrop > 0: x = tf.nn.dropout(x, 1-pdrop) return x def mask_attn_weights(w): n = shape_list(w)[-1] b = tf.matrix_band_part(tf.ones([n, n]), -1, 0) b = tf.reshape(b, [1, 1, n, n]) w = w*b + -1e9*(1-b) return w def _attn(q, k, v, train=False, scale=False):
tensorflow.ones
5,012
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib self._hidden_units = hidden_units self._feature_columns = tuple(feature_columns or []) self._enable_centered_bias = enable_centered_bias self._estimator = estimator.Estimator( model_fn=_dnn_model_fn, model_dir=model_dir, config=config, params={ "head": head_lib._multi_class_head( # pylint: disable=protected-access n_classes, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias), "hidden_units": hidden_units, "feature_columns": self._feature_columns, "optimizer":
tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head
5,013
import tensorflow as tf print('\ncholesky(D):') print(sess.run(tf.cholesky(identity_matrix))) print('\nselfAdjointEig(D):') print(sess.run(tf.self_adjoint_eig(D))) print(sess.run(tf.div(13, 4))) print(sess.run(tf.truediv(13, 4))) print(sess.run(tf.floordiv(13, 4))) print(sess.run(tf.mod(13.2, 4))) print(sess.run(tf.cross([1, 0, 0], [0, 1, 0]))) print(sess.run(tf.square([1, 2, 3])))
tensorflow.div
5,014
import tensorflow as tf return x def lnlstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0)) b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0)) gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0)) bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0)) c, h = tf.split(axis=1, num_or_size_splits=2, value=s) for idx, (x, m) in enumerate(zip(xs, ms)): c = c*(1-m) h = h*(1-m) z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(_ln(c, gc, bc))
tensorflow.constant_initializer
5,015
import tensorflow as tf states_tiled = tf.tile(states[:, None], [1, num_tasks, 1]) # B x B x D states_tiled = tf.reshape(states_tiled, [batch_size * num_tasks, obs_dim]) # B*B x D actions_tiled = tf.tile(actions[:, None], [1, num_tasks, 1]) # B x B x D actions_tiled = tf.reshape(actions_tiled, [batch_size * num_tasks, action_dim]) # B*B x D tasks_tiled = tf.tile(tasks[None], [batch_size, 1, 1]) # B x B x D tasks_tiled = tf.reshape(tasks_tiled, [batch_size * num_tasks, task_dim]) # B*B x D next_states_tiled = tf.tile(next_states[:, None], [1, num_tasks, 1]) next_states_tiled = tf.reshape(next_states_tiled,
tensorflow.tile
5,016
import tensorflow as tf ldj = tf.where(x2 > self.epsilon, ldj, tf.zeros_like(ldj)) return z2, tf.math.reduce_sum(ldj, axis=[1,2,3]) def _inverse(self, x1, z2, **kwargs): params = self.parameterizer(x1) mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2] x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True)) x2 = tf.where(z2 > self.epsilon, x2, z2) ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj)) return x2, tf.math.reduce_sum(ldj, axis=[1,2,3]) def half_gaussianize(x, log_sigmas, inverse=tf.constant(False)): if inverse: z = tf.math.exp(log_sigmas)*x ldj = tf.math.reduce_sum(log_sigmas, axis=[1,2,3]) else: z = x*tf.math.exp(-log_sigmas) ldj = -tf.math.reduce_sum(log_sigmas, axis=[1,2,3]) return z, ldj class HalfGaussianize(Parameterize): """
tensorflow.constant
5,017
import tensorflow as tf with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval())
tensorflow.train.Saver
5,018
import tensorflow as tf """ if key is None: raise ValueError('A key is required for _min_and_max_per_key') if not reduce_instance_dims: raise NotImplementedError('Per-key elementwise reduction not supported') with tf.compat.v1.name_scope(name, 'min_and_max_per_key'): output_dtype = x.dtype if (not reduce_instance_dims and isinstance(x, (tf.SparseTensor, tf.RaggedTensor)) and x.dtype.is_floating): combine_fn = np.nanmax default_accumulator_value = (np.nan if x.dtype.is_floating else
tensorflow.compat.v1.name_scope
5,019
import tensorflow as tf lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) output_fw, _ = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords) output_bw, _ = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords) output = tf.concat([output_fw, output_bw], axis=-1) # transpose it back output = tf.transpose(output, perm=[1, 0, 2]) return output
tensorflow.concat
5,020
import tensorflow as tf assert var.name == var_name_n_prune_ratio[0], \ 'unmatched variable names: %s vs. %s' % (var.name, var_name_n_prune_ratio[0]) prune_ratio = self.__calc_prune_ratio_dyn(var_name_n_prune_ratio[1]) # create a mask and non-masked backup for each variable name = var.name.replace(':0', '_mask') mask = tf.get_variable(name, initializer=tf.ones(var.shape), trainable=False) name = var.name.replace(':0', '_var_bkup') var_bkup = tf.get_variable(name, initializer=var.initialized_value(), trainable=False) # create update operations var_bkup_update_op = var_bkup.assign(tf.where(mask > 0.5, var, var_bkup)) with tf.control_dependencies([var_bkup_update_op]): mask_thres = tf.contrib.distributions.percentile(tf.abs(var_bkup), prune_ratio * 100) mask_update_op = mask.assign(tf.cast(tf.abs(var_bkup) > mask_thres, tf.float32)) with tf.control_dependencies([mask_update_op]): prune_op = var.assign(var_bkup * mask) # record pruning masks & operations masks += [mask] prune_ops += [prune_op] return masks, tf.group(prune_ops) def __calc_prune_ratio_dyn(self, prune_ratio_fnl): """Calculate the dynamic pruning ratio. Args: * prune_ratio_fnl: final pruning ratio
tensorflow.abs
5,021
import tensorflow as tf tf.app.flags.DEFINE_integer('num_readers', 16, '') tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') tf.app.flags.DEFINE_integer('max_steps', 100000, '') tf.app.flags.DEFINE_integer('loss_scale', 1024, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '1', '') tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '') tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') tf.app.flags.DEFINE_string('pretrained_model_path', None, '') tf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision') tf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune') tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data') tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets')
tensorflow.app.flags.DEFINE_integer
5,022
import tensorflow as tf with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params class PPO_LSTM(Base): def __init__(self, env, summary_dir='./', gpu=False):
tensorflow.distributions.Normal
5,023
import tensorflow as tf U = tf.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1]) gstack = tf.concat([self.prev_g, cut_g], axis=1)
tensorflow.stop_gradient
5,024
import tensorflow as tf lang2_resfile.write("\n") return filename class TranslateDistillProblem(TranslateProblem): """Base class for translation problems.""" def is_generate_per_split(self): return True def example_reading_spec(self): data_fields = {"dist_targets": tf.VarLenFeature(tf.int64)} if self.has_inputs: data_fields["inputs"] = tf.VarLenFeature(tf.int64) # hack: ignoring true targets and putting dist_targets in targets data_items_to_decoders = { "inputs": tf.contrib.slim.tfexample_decoder.Tensor("inputs"), "targets": tf.contrib.slim.tfexample_decoder.Tensor("dist_targets"), } return (data_fields, data_items_to_decoders)
tensorflow.VarLenFeature
5,025
import tensorflow as tf l2_loss += tf.nn.l2_loss(W) l2_loss += tf.nn.l2_loss(b) # do logit = W*X+b logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores") predictions = tf.nn.softmax(logit, name="predictions") #claulate loss and optimizer with tf.variable_scope("FCoptimize", reuse=None): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y) + l2_reg_lambda * l2_loss) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) # calculate accuracy correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
tensorflow.variable_scope
5,026
import tensorflow as tf ValueError: if num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Currently RfcnBoxPredictor only supports ' 'predicting a single box per class per location.') batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] def get_box_indices(proposals): proposals_shape = proposals.get_shape().as_list() if any(dim is None for dim in proposals_shape): proposals_shape = tf.shape(proposals)
tensorflow.shape
5,027
import tensorflow as tf with tf.variable_scope(name): u = conv3d(x, [k, k, k, in_channels, out_channels*pow(r, 3)], 'conv', bias=True, stride=1) h = subpixel_conv3d(u, r, out_channels) return h def minibatch_discrimination(x, n_kernels, dim_per_kernel, name): with tf.variable_scope(name): batch_size, nf = x.get_shape().as_list() h = linear(x, [nf, n_kernels*dim_per_kernel], 'h1') activation = tf.reshape(h, (batch_size, n_kernels, dim_per_kernel)) big = tf.eye(batch_size) big = tf.expand_dims(big, 1) abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation, 3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2) mask = 1. - big masked = tf.exp(-abs_dif) * mask
tensorflow.reshape
5,028
import tensorflow as tf output /= tf.reduce_sum( output, axis=len(output.get_shape()) - 1, keep_dims=True) # manual computation of crossentropy epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon, 1. - epsilon) return -tf.reduce_sum( target * tf.log(output), axis=len(output.get_shape()) - 1) else: try: return tf.nn.softmax_cross_entropy_with_logits( labels=target, logits=output) except TypeError:
tensorflow.log
5,029
import tensorflow as tf if target_modality.top_is_pointwise: cur_sample = samples[:, -1, :, :] else: cur_sample = samples[:, common_layers.shape_list(recent_output)[1], :, :] cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1)) samples = tf.concat([recent_output, cur_sample], axis=1) if not context.in_eager_mode(): samples.set_shape([None, None, None, 1])
tensorflow.expand_dims
5,030
import tensorflow as tf '''预测,损失,优化''' with tf.variable_scope('softmax'): W = tf.get_variable('W', [state_size, num_classes]) b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
tensorflow.get_variable
5,031
from tensorflow.python.framework import ops ops.RegisterShape("Log")(common_shapes.unchanged_shape) ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape) ops.RegisterShape("Neg")(common_shapes.unchanged_shape) ops.RegisterShape("Real")(common_shapes.unchanged_shape) ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Sign")(common_shapes.unchanged_shape) ops.RegisterShape("Sin")(common_shapes.unchanged_shape) ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape) ops.RegisterShape("Square")(common_shapes.unchanged_shape) ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape) ops.RegisterShape("Tanh")(common_shapes.unchanged_shape) ops.RegisterShape("Cast")(common_shapes.unchanged_shape) ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape) @ops.RegisterShape("Add")
tensorflow.python.framework.ops.RegisterShape
5,032
import tensorflow as tf candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width] candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width] candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width] flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]
tensorflow.equal
5,033
import tensorflow as tf from google.protobuf import text_format import unittest from tensorflow_metadata.proto.v0 import schema_pb2 def _make_tensors_with_override(): x = tf.compat.v1.placeholder(tf.int64, (None,)) schema_inference.set_tensor_schema_override(x, tf.constant(5), tf.constant(6)) return {'x': x} class SchemaInferenceTest(test_case.TransformTestCase): # pylint: disable=g-long-lambda @test_case.named_parameters(
tensorflow.constant
5,034
import tensorflow as tf per_images_iou = [] for batch_idx in range(batch_size): box_mask_for_image = tf.equal(proposal_boxes[:, 0], batch_idx) single_image_boxes = tf.boolean_mask(proposal_boxes, box_mask_for_image) single_image_boxes = single_image_boxes[:, 1:] single_image_gt_boxes = gt_boxes[batch_idx, 0:orig_gt_counts[batch_idx], :] single_image_iou = pairwise_iou(single_image_boxes, single_image_gt_boxes) per_images_iou.append(single_image_iou)
tensorflow.boolean_mask
5,035
import tensorflow as tf original_shape: Shape of `labels` and `logits` before reshape. Raises: ValueError: If `labels` and `logits` do not have the same shape. """ # Convert `labels` and `logits` to Tensors and standardize dtypes. logits = tf.convert_to_tensor(logits, name='logits') labels = losses_utils.convert_and_cast(labels, 'labels', logits.dtype.base_dtype) weights = losses_utils.convert_and_cast(weights, 'weights', logits.dtype.base_dtype) try:
tensorflow.convert_to_tensor
5,036
import tensorflow as tf kind = name[:4] if kind == 'conv': kernels, bias = weights[i][0][0][0][0] # matconvnet: weights are [width, height, in_channels, out_channels] # tensorflow: weights are [height, width, in_channels, out_channels] kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w") bias = utils.get_variable(bias.reshape(-1), name=name + "_b") current = utils.conv2d_basic(current, kernels, bias) elif kind == 'relu': current = tf.nn.relu(current, name=name) if FLAGS.debug: utils.add_activation_summary(current) elif kind == 'pool': current = utils.avg_pool_2x2(current) net[name] = current return net
tensorflow.nn.relu
5,037
import tensorflow as tf decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size]) targets = tf.random_uniform([batch, height, width, channels], minval=-1., maxval=1.) output = common_image_attention.create_output( decoder_output, rows, cols, targets, hparams) if hparams.likelihood == common_image_attention.DistributionType.CAT: self.assertEqual(output.shape, (batch, height, width, channels, depth)) else: self.assertEqual(output.shape, (batch, height, width, depth)) if __name__ == "__main__": tf.test.main()
tensorflow.test.main
5,038
from tensorflow.python.ops import state_ops g_t = tf.div(m_t_hat, tf.sqrt(v_t_hat) + eps) g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t) var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t return control_flow_ops.group(*[var_update, m_t, v_t, g_t])
tensorflow.python.ops.state_ops.assign_sub
5,039
import tensorflow as tf fields.InputDataFields.groundtruth_keypoint_visibilities: tf.constant([True, False, True]), fields.InputDataFields.groundtruth_instance_masks: tf.constant(np.random.rand(3, 4, 4).astype(np.float32)), fields.InputDataFields.groundtruth_is_crowd: tf.constant([False, True, False]), fields.InputDataFields.groundtruth_difficult: tf.constant(np.array([0, 0, 1], np.int32)) }
tensorflow.constant
5,040
import tensorflow as tf input_image = tf.constant(input_image, dtype=tf.float32) output = MODEL.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = []
tensorflow.saturate_cast
5,041
import tensorflow as tf if is_dynamic_rnn: lstm_input = tf.transpose(x, perm=[1, 0, 2]) outputs, _ = tf.lite.experimental.nn.dynamic_rnn( lstm_layer, lstm_input, dtype="float32") outputs = tf.unstack(outputs, axis=0) else: lstm_input = tf.unstack(x, self.time_steps, 1) outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
tensorflow.unstack
5,042
import tensorflow as tf with tf.variable_scope('initial_lstm'): features_mean = tf.reduce_mean(features, 1)
tensorflow.reduce_mean
5,043
import tensorflow as tf decoder.lstm_proj_size, decoder.layers) assert not decoder.pred_maxout_layer or cell_output_size % 2 == 0, 'cell size must be a multiple of 2' if decoder.use_lstm is False: decoder.cell_type = 'GRU' embedding_shape = [decoder.vocab_size, decoder.embedding_size] weight_scale = decoder.embedding_weight_scale or decoder.weight_scale if weight_scale is None: initializer = None # FIXME elif decoder.embedding_initializer == 'uniform' or (decoder.embedding_initializer is None and decoder.initializer == 'uniform'): initializer = tf.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale) else: initializer = tf.random_normal_initializer(stddev=weight_scale) with tf.device('/cpu:0'): embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer) input_shape = tf.shape(decoder_inputs) batch_size = input_shape[0] time_steps = input_shape[1] scope_name = 'decoder_{}'.format(decoder.name) scope_name += '/' + '_'.join(encoder.name for encoder in encoders)
tensorflow.random_uniform_initializer
5,044
import tensorflow as tf tgtimg_h2 = lrelu(conv2d(tgtimg_h1, self.df_dim*4, name='h2_conv')) tgtimg_h3 = lrelu(conv2d(tgtimg_h2, self.df_dim*8, name='h3_conv')) tgtimg_h4 = lrelu(linear(tf.reshape(tgtimg_h3, [self.batch_size, -1]), featsize, 'h4_lin')) tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin'))
tensorflow.reshape
5,045
import tensorflow as tf def __init__(self, shape): """Creates a LinearModel object.""" x = tf.placeholder(tf.float32, [None, shape[0]]) w = tf.Variable(tf.zeros(shape)) b = tf.Variable(tf.zeros(shape[1])) self.x = x self.w = w self.b = b y = tf.nn.softmax(tf.matmul(x, w) + b) y_ = tf.placeholder(tf.float32, [None, shape[1]]) self.y_ = y_ cross_entropy = tf.reduce_mean( -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]) ) self.cross_entropy = cross_entropy self.cross_entropy_grads = tf.gradients(cross_entropy, [w, b]) self.sess = tf.Session() # In order to get and set the weights, we pass in the loss function to # Ray's TensorFlowVariables to automatically create methods to modify # the weights. self.variables = ray.experimental.tf_utils.TensorFlowVariables( cross_entropy, self.sess )
tensorflow.log
5,046
import tensorflow as tf perturbs = [_mask_by_length(tf.random_normal(shape=tf.shape(emb)), length) for emb in embedded] for _ in range(num_power_iteration): perturbs = [_scale_l2(d, small_constant_for_finite_diff) for d in perturbs] d_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)]) kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes) perturbs = tf.gradients( kl, perturbs, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N) perturbs = [tf.stop_gradient(d) for d in perturbs] perturbs = [_scale_l2(_mask_by_length(d, length), perturb_norm_length) for d in perturbs] vadv_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)]) return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
tensorflow.stop_gradient
5,047
import tensorflow as tf degree_l.append(degree) order_m.append(order) return (tf.convert_to_tensor(value=degree_l), tf.convert_to_tensor(value=order_m)) def _evaluate_legendre_polynomial_pmm_eval(m, x): pmm = tf.pow(1.0 - tf.pow(x, 2.0), tf.cast(m, dtype=x.dtype) / 2.0) ones = tf.ones_like(m) pmm *= tf.cast( tf.pow(-ones, m) * double_factorial(2 * m - 1), dtype=pmm.dtype) return pmm def _evaluate_legendre_polynomial_loop_cond(x, n, l, m, pmm, pmm1): return tf.cast(tf.math.count_nonzero(n <= l), tf.bool) def _evaluate_legendre_polynomial_loop_body(x, n, l, m, pmm, pmm1):
tensorflow.pow
5,048
import tensorflow as tf Returns: tuple (final output, loss) ''' y_pairs = tf.reshape(output, [-1,2]) # fold: 1 x n -> [n/2 x 2] pos_scores, neg_scores = tf.split(1, 2, y_pairs) # separate pairs hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores) total_hinge_loss = tf.reduce_sum(hinge_losses) return output, total_hinge_loss
tensorflow.nn.relu
5,049
from tensorflow.python.ops import state_ops new_value = array_ops.zeros(next_shape, dtype=values.dtype) old_value = array.value() assign_op = state_ops.assign(array, new_value, validate_shape=False) with ops.control_dependencies([assign_op]):
tensorflow.python.ops.state_ops.assign
5,050
import tensorflow as tf tf.flags.DEFINE_string(
tensorflow.flags.DEFINE_string
5,051
import tensorflow as tf mask_fw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, )) self.dropout_mask.append((mask_fw, mask_bw, )) def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True): outputs = [tf.transpose(inputs, [1, 0, 2])] for layer in range(self.num_layers): gru_fw, gru_bw = self.grus[layer] init_fw, init_bw = self.inits[layer] mask_fw, mask_bw = self.dropout_mask[layer] with tf.variable_scope("fw_{}".format(layer)): out_fw, _ = gru_fw( outputs[-1] * mask_fw, initial_state=(init_fw, )) with tf.variable_scope("bw_{}".format(layer)):
tensorflow.transpose
5,052
import tensorflow as tf Returns ------- act: (tf.Variable, bool, float) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
tensorflow.placeholder
5,053
import tensorflow as tf # Concatenationation of above layers, followed by FC layer concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2 h1 = Dense(units=RNN_SIZE)(concat) h2 = Dense(units=RNN_SIZE)(h1) self.h3 = tf.nn.relu(h2+concat) #Recurrent network for temporal dependencies lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE,state_is_tuple=True) c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) state_init = [c_init, h_init] c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c]) h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h]) state_in = (c_in, h_in) rnn_in = tf.expand_dims(self.h3, [0]) step_size = tf.shape(inputs)[:1] state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in) lstm_outputs, lstm_state = tf.nn.dynamic_rnn( lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size, time_major=False) lstm_c, lstm_h = lstm_state state_out = (lstm_c[:1, :], lstm_h[:1, :]) self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE]) ''' CHANGES - removed blocking layer - edited out stop_gradient lines (Dont need them) ''' # Value FC
tensorflow.shape
5,054
import tensorflow as tf conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32) return safe_get(name, list(shape), initializer=conv_initializer, dtype=tf.float32) def init_fc_weights_snn(shape, name=None): weights = np.random.normal(scale=np.sqrt(1.0/shape[0]), size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def init_conv_weights_snn(shape, name=None): weights = np.random.normal(scale=np.sqrt(1.0/(shape[0]*shape[1]*shape[2])), size=shape).astype('f') return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32) def batched_matrix_vector_multiply(vector, matrix): """ computes x^T A in mini-batches. """ vector_batch_as_matricies = tf.expand_dims(vector, [1]) mult_result = tf.matmul(vector_batch_as_matricies, matrix) squeezed_result = tf.squeeze(mult_result, [1]) return squeezed_result
tensorflow.constant_initializer
5,055
import tensorflow as tf loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1) mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior]) num_pos_per_batch = tf.reduce_sum( tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True) num_pos_per_batch = tf.maximum(num_pos_per_batch, 1) num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch, tf.cast(num_prior, tf.float32) - 1) mask_hard_neg = tf.reshape( tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch, [num_batch * num_prior, 1]) # 3. classification loss including positive and negative examples loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg) loss_class_mask_b = tf.broadcast_to(loss_class_mask, tf.shape(class_pred)) filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32), loss_class_mask) filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b) filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class]) loss_class = tf.keras.losses.sparse_categorical_crossentropy( y_true=filter_class_true, y_pred=filter_class_pred) loss_class = tf.reduce_mean(loss_class) return loss_loc, loss_landm, loss_class
tensorflow.logical_or
5,056
import tensorflow as tf X_MIDDLE = X # ===============================================================================DECODER with tf.variable_scope(decoderscope) as scope: if reuse_decoder: scope.reuse_variables() # print('vnet scope', is_train, reuse_unet) # print('VNET Latent:', X.get_shape().as_list()) with tf.variable_scope('decoder'): X = decoder_conf('d3', X, 512, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 12 > 14 if self.args.skip_connections: X = tf.concat((X, X2), axis=-1) X = decoder_conf('u4', X, 256, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 14 > 28 X = decoder_conf('d4', X, 256, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 28 > 30 if self.args.skip_connections: X = tf.concat((X, X1), axis=-1) X = decoder_conf('u5', X, 128, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 30 > 60 X_LATE = X
tensorflow.variable_scope
5,057
import tensorflow as tf BATCH_SIZE = 30 def __init__(self, player_name, build_training_model): self.player_name = player_name self._build_training_model = build_training_model def _build_data_pipeline(self): def normalize(image, label): image = tf.cast(image, tf.float32) / 255.0 return image, label dataset = tf.data.TFRecordDataset(["./data/train.tfrecord"]) dataset = dataset.map(decode) dataset = dataset.map(normalize) dataset = dataset.repeat() dataset = dataset.batch(self.BATCH_SIZE) iterator = dataset.make_one_shot_iterator()
tensorflow.cast
5,058
import tensorflow as tf xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) run_config = xlnet.create_run_config(is_training, True, FLAGS) xlnet_model = xlnet.XLNetModel( xlnet_config=xlnet_config, run_config=run_config, input_ids=inp, seg_ids=seg_id, input_mask=inp_mask) summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) with tf.variable_scope("logits"): logits = tf.layers.dense(summary, 1, kernel_initializer=xlnet_model.get_initializer()) logits = tf.reshape(logits, [bsz_per_core, 4]) one_hot_target = tf.one_hot(label, 4) per_example_loss = -tf.reduce_sum( tf.nn.log_softmax(logits) * one_hot_target, -1) total_loss = tf.reduce_mean(per_example_loss) return total_loss, per_example_loss, logits
tensorflow.variable_scope
5,059
import tensorflow as tf if i == num_gpu - 1: regularization_losses = tf.get_collection(
tensorflow.get_collection
5,060
import tensorflow as tf # Jump to that directory until this test is done. with self.tempWorkingDir(tempdir): # Save training snapshots to a relative path. traindir = "train/" os.mkdir(traindir) filename = "snapshot" filepath = os.path.join(traindir, filename) with self.test_session() as sess: # Build a simple graph. v0 = tf.Variable(0.0) inc = v0.assign_add(1.0) save = tf.train.Saver({"v0": v0}) # Record a short training history. tf.initialize_all_variables().run() save.save(sess, filepath, global_step=0) inc.eval() save.save(sess, filepath, global_step=1) inc.eval() save.save(sess, filepath, global_step=2)
tensorflow.Variable
5,061
import tensorflow as tf name='Categorical_distribution') manual_decoder_input = tf.placeholder(dtype=tf.float32, shape=[1, z_dim + n_labels], name='Decoder_input')
tensorflow.placeholder
5,062
import tensorflow as tf self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testSequenceLoss(self): with self.test_session() as sess: logits = [tf.constant(i + 0.5, shape=[2, 5]) for i in range(3)] targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] weights = [tf.constant(1.0, shape=[2]) for i in range(3)] average_loss_per_example = tf.nn.seq2seq.sequence_loss( logits, targets, weights, average_across_timesteps=True, average_across_batch=True) res = sess.run(average_loss_per_example) self.assertAllClose(1.60944, res) average_loss_per_sequence = tf.nn.seq2seq.sequence_loss( logits, targets, weights,
tensorflow.nn.seq2seq.sequence_loss
5,063
import tensorflow as tf 'batch_size', 10, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_integer( 'xt_batch_size', 10, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_boolean( 'use_ohkm', True, 'Wether we will use the ohkm for hard keypoints.') tf.app.flags.DEFINE_string( 'data_format', 'channels_first', # 'channels_first' or 'channels_last'
tensorflow.app.flags.DEFINE_boolean
5,064
import tensorflow as tf from scipy.misc import imsave FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('dataset', '', 'cifar10 or cifar100.') tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.') tf.app.flags.DEFINE_string('train_data_path', '',
tensorflow.app.flags.DEFINE_string
5,065
import tensorflow as tf that of `labels`. surrogate_type: Either 'xent' or 'hinge', specifying which upper bound should be used for indicator functions. Returns: A `Tensor` of shape [num_labels] or [num_labels, num_anchors]. """ maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0 maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype) if logits.get_shape().ndims == 3 and labels.get_shape().ndims < 3: labels = tf.expand_dims(labels, 2) loss_on_positives = losses_utils.weighted_surrogate_loss( labels, logits, surrogate_type, negative_weights=0.0) / maybe_log2 return tf.reduce_sum(weights * (labels - loss_on_positives), 0) def false_positives_upper_bound(labels, logits, weights, surrogate_type): """Calculate an upper bound on the number of false positives. This upper bound on the number of false positives given `logits` and `labels`
tensorflow.expand_dims
5,066
import tensorflow as tf with tf.variable_scope("conv_context") as scope: tgtctx_h0 = lrelu(conv2d(tgtctx, self.df_dim, name='h0_conv')) tgtctx_h1 = lrelu(conv2d(tgtctx_h0, self.df_dim*2, name='h1_conv')) tgtctx_h2 = lrelu(conv2d(tgtctx_h1, self.df_dim*4, name='h2_conv')) tgtctx_h3 = lrelu(conv2d(tgtctx_h2, self.df_dim*8, name='h3_conv')) tgtctx_h4 = lrelu(linear(tf.reshape(tgtctx_h3, [self.batch_size, -1]), featsize, 'h4_lin')) tgtctx_z = linear(tgtctx_h4, featsize, 'hz_lin') with tf.variable_scope("conv") as scope: srcimg_h0 = lrelu(conv2d(srcimg, self.df_dim, name='h0_conv')) srcimg_h1 = lrelu(conv2d(srcimg_h0, self.df_dim*2, name='h1_conv')) srcimg_h2 = lrelu(conv2d(srcimg_h1, self.df_dim*4, name='h2_conv')) srcimg_h3 = lrelu(conv2d(srcimg_h2, self.df_dim*8, name='h3_conv')) print(srcimg_h3.get_shape()) srcimg_h4 = lrelu(linear(tf.reshape(srcimg_h3, [self.batch_size, -1]), featsize, 'h4_lin')) srcimg_z = lrelu(linear(srcimg_h4, featsize, 'hz_lin'))
tensorflow.variable_scope
5,067
from tensorflow.python.ops import math_ops predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) value_tensor, update_op = streaming_mean_squared_error( predictions, labels, weights, None, None, name or 'root_mean_squared_error') root_mean_squared_error = math_ops.sqrt(value_tensor) with ops.control_dependencies([update_op]): update_op = math_ops.sqrt(update_op) if metrics_collections: ops.add_to_collections(metrics_collections, root_mean_squared_error) if updates_collections: ops.add_to_collections(updates_collections, update_op) return root_mean_squared_error, update_op
tensorflow.python.ops.math_ops.sqrt
5,068
import tensorflow as tf tf.float32, # data_type for input fed to the graph train=False, # doing inference resize_method='crop') images, labels = preprocessor.minibatch(dataset, subset='train') graph = load_graph(model_file) input_tensor = graph.get_tensor_by_name(input_layer + ":0") output_tensor = graph.get_tensor_by_name(output_layer + ":0") config = tf.compat.v1.ConfigProto() config.inter_op_parallelism_threads = num_inter_threads config.intra_op_parallelism_threads = num_intra_threads total_accuracy1, total_accuracy5 = (0.0, 0.0) num_processed_images = 0 num_remaining_images = 5000 top1 = 0
tensorflow.compat.v1.ConfigProto
5,069
import tensorflow as tf self.assertAllEqual(nms_masks2.numpy(), nms_masks_expected2.numpy()) self.assertAllClose(nms_scores2.numpy(), nms_scores_expected2.numpy()) self.assertAllEqual(nms_classes2.numpy(), nms_classes_expected2.numpy()) def test_instance_non_maximum_suppression_2d_scores(self): mask0 = tf.constant([[1, 0], [0, 1]], dtype=tf.float32) mask1 = tf.constant([[1, 1], [0, 1]], dtype=tf.float32) mask2 = tf.constant([[1, 0],
tensorflow.constant
5,070
import tensorflow as tf # outputs = activation_fn(outputs) outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
tensorflow.nn.leaky_relu
5,071
import tensorflow as tf 参数 ---- logPath :string,日志存储路径 返回 ---- summaryWriter :FileWriter,日志写入器 """ if tf.gfile.Exists(logPath): tf.gfile.DeleteRecursively(logPath) summaryWriter = tf.summary.FileWriter(logPath, graph=tf.get_default_graph()) return summaryWriter
tensorflow.get_default_graph
5,072
import tensorflow as tf numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32)) div = tf.truediv(numerator, 2 * sigma ** 2) weights *= tf.exp(div) # result of the truncated normal distribution # normalize to keep a probability distribution # weights /= (tf.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) return weighted_average, weights def attention(encoder, scope=None, **kwargs): attention_functions = {
tensorflow.expand_dims
5,073
import tensorflow as tf with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables): f_score, f_geometry = model.model(images, is_training=True) model_loss = model.loss(score_maps, f_score, geo_maps, f_geometry, training_masks) total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) # add summary if reuse_variables is None: tf.summary.image('input', images) tf.summary.image('score_map', score_maps)
tensorflow.get_collection
5,074
import tensorflow as tf self.assertFalse(tf.contrib.util.constant_value(mvn.is_scalar_batch())) # We now test every codepath within the underlying is_scalar_helper # function. # Test case 1, 2. x = tf.placeholder_with_default(input=1, shape=[]) # None would fire an exception were it actually executed. self.assertTrue(normal._is_scalar_helper(x.shape, lambda: None)) self.assertTrue( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) x = tf.placeholder_with_default(input=[1], shape=[1]) # None would fire an exception were it actually executed. self.assertFalse(normal._is_scalar_helper(x.shape, lambda: None)) self.assertFalse( normal._is_scalar_helper(tf.TensorShape(None), lambda: tf.shape(x))) # There's no notion of partially known shapes in eager mode, so exit # early.
tensorflow.shape
5,075
import tensorflow as tf log("Checkpoint path: {}".format(checkpoint_fpath)) log("Loading training data from: {}".format(metadat_fpath)) log("Using model: Tacotron") log(hparams_debug_string()) # Start by setting a seed for repeatability tf.set_random_seed(hparams.tacotron_random_seed) # Set up data feeder coord = tf.train.Coordinator() with tf.variable_scope("datafeeder") as scope: feeder = Feeder(coord, metadat_fpath, hparams) # Set up model: global_step = tf.Variable(0, name="global_step", trainable=False) model, stats = model_train_mode(args, feeder, hparams, global_step) eval_model = model_test_mode(args, feeder, hparams, global_step) # Embeddings metadata
tensorflow.train.Coordinator
5,076
from tensorflow.python.ops import math_ops predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) fp, fp_update = _streaming_sparse_false_positive_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights) metric = math_ops.div(tp, math_ops.add(tp, fp), name=name) update = math_ops.div( tp_update, math_ops.add(tp_update, fp_update), name='update') if metrics_collections: ops.add_to_collections(metrics_collections, metric) if updates_collections: ops.add_to_collections(updates_collections, update) return metric, update
tensorflow.python.ops.math_ops.add
5,077
import tensorflow as tf W = tf.gather(W, unused_indices, axis=0) W = tf.reshape(W, (1, 1, num_out_blocks * block_ch, block_ch)) X = tf.reshape(out_blocks, (-1, w, h, num_out_blocks * block_ch)) X = tf.nn.relu(X)
tensorflow.reshape
5,078
from tensorflow.python.ops import array_ops array_ops.size(tensor.shape) + dim, [1]) else: expand_dims = [dim] expanded_shape = array_ops.concat( 0, (array_ops.slice(tensor.shape, [0], expand_dims), [1], array_ops.slice(tensor.shape, expand_dims, [-1])), name='expanded_shape') expanded = sparse_ops.sparse_reshape(
tensorflow.python.ops.array_ops.slice
5,079
import tensorflow as tf out = img_in with tf.variable_scope("convnet"):
tensorflow.variable_scope
5,080
import tensorflow as tf """Decodes the image and set its static shape.""" image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3) image.set_shape([None, None, 3]) return image def _decode_boxes(self, parsed_tensors): """Concat box coordinates in the format of [ymin, xmin, ymax, xmax].""" xmin = parsed_tensors['image/object/bbox/xmin'] xmax = parsed_tensors['image/object/bbox/xmax'] ymin = parsed_tensors['image/object/bbox/ymin'] ymax = parsed_tensors['image/object/bbox/ymax'] return tf.stack([ymin, xmin, ymax, xmax], axis=-1) def _decode_masks(self, parsed_tensors): """Decode a set of PNG masks to the tf.float32 tensors.""" def _decode_png_mask(png_bytes): mask = tf.squeeze( tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1) mask = tf.cast(mask, dtype=tf.float32) mask.set_shape([None, None]) return mask
tensorflow.stack
5,081
import tensorflow as tf tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert( tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.name_scope(name): with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost
tensorflow.name_scope
5,082
import tensorflow as tf return biases # 定义一个卷积层,命名空间为name,输入为x,卷积核为W,步长为stride,偏差为bias,激活函数默认为relu def conv2d(self,name, x, W, stride, bias): with tf.variable_scope(name) as scope: conv = tf.nn.conv2d(x, W, [1, stride, stride, 1], padding='SAME') pre_activation = tf.nn.bias_add(conv, bias) output = tf.nn.relu(pre_activation, name=scope.name) return output # 定义一个池化层,默认为max_pooling
tensorflow.nn.bias_add
5,083
import tensorflow as tf else: start_loss = focal_loss(tf.nn.softmax(self.logits1, -1), start_label) end_loss = focal_loss(tf.nn.softmax(self.logits2, -1), end_label) self.loss = tf.reduce_mean(start_loss + end_loss) self.logger.info("loss type %s" % self.config.loss_type) self.all_params = tf.trainable_variables() if self.config.l2_norm is not None: self.logger.info("applying l2 loss") variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables)
tensorflow.trainable_variables
5,084
import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('dataset', '', 'cifar10 or cifar100.') tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.') tf.app.flags.DEFINE_string('train_data_path', '', 'Filepattern for training data.') tf.app.flags.DEFINE_string('eval_data_path', '', 'Filepattern for eval data') tf.app.flags.DEFINE_string('train_dir', '', 'Directory to keep training outputs.') tf.app.flags.DEFINE_string('eval_dir', '', 'Directory to keep eval outputs.') tf.app.flags.DEFINE_integer('eval_batch_count', 10, 'Number of batches to eval.') tf.app.flags.DEFINE_bool('eval_once', False, 'Whether evaluate the model only once.') tf.app.flags.DEFINE_string('log_root', '', 'Directory to keep the checkpoints. Should be a ' 'parent directory of FLAGS.train_dir/eval_dir.') tf.app.flags.DEFINE_integer('num_gpus', 0, 'Number of gpus used for training. (0 or 1)') tf.app.flags.DEFINE_integer('num_residual_units', 5, 'num of residual units')
tensorflow.app.flags.DEFINE_integer
5,085
import tensorflow as tf :param x: input to the decoder :param reuse: True -> Reuse the decoder variables, False -> Create or search of variables before creating :return: tensor which should ideally be the input given to the encoder. """ if reuse: tf.get_variable_scope().reuse_variables() with tf.name_scope('Decoder'): d_dense_1 = tf.nn.relu(dense(x, z_dim + n_labels, n_l2, 'd_dense_1')) d_dense_2 = tf.nn.relu(dense(d_dense_1, n_l2, n_l1, 'd_dense_2')) output = tf.nn.sigmoid(dense(d_dense_2, n_l1, input_dim, 'd_output')) return output
tensorflow.name_scope
5,086
from tensorflow.python.platform import gfile # s1 should still be here, we are Not checking now to reduce time # variance in the test. # We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next # call to Save(), will delete s2, because max_to_keep is 2, and because # we already kept the old s1. s2 is very close in time to s1 so it gets # deleted. s4 = save.save(sess, os.path.join(save_dir, "s4")) self.assertEqual([s3, s4], save.last_checkpoints) # Check that s1 is still here, but s2 is gone. self.assertTrue(gfile.Exists(s1)) self.assertFalse(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s4)) class SaveRestoreWithVariableNameMap(tf.test.TestCase): def testNonReshape(self): save_path = os.path.join(self.get_temp_dir(), "basics")
tensorflow.python.platform.gfile.Exists
5,087
import tensorflow as tf @classmethod def nonlinearity_grad_override(cls, op, grad): input = op.inputs[0] return tf.zeros_like(input) """ Saliency maps https://arxiv.org/abs/1312.6034 """ class Saliency(GradientBasedMethod): def get_symbolic_attribution(self): return [tf.abs(g) for g in tf.gradients(ys=self.T, xs=self.X)] """ Gradient * Input https://arxiv.org/pdf/1704.02685.pdf - https://arxiv.org/abs/1611.07270 """ class GradientXInput(GradientBasedMethod): def get_symbolic_attribution(self): return [g * x for g, x in zip( tf.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X])]
tensorflow.abs
5,088
import tensorflow as tf 'log_every_n_steps', 10, 'The frequency with which logs are print.') tf.app.flags.DEFINE_integer( 'save_summary_steps', 500, 'The frequency with which summaries are saved, in seconds.') tf.app.flags.DEFINE_integer( 'save_checkpoints_secs', 7200, 'The frequency with which the model is saved, in seconds.') # model related configuration tf.app.flags.DEFINE_integer(
tensorflow.app.flags.DEFINE_integer
5,089
import tensorflow as tf train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput(
tensorflow.variable_scope
5,090
import tensorflow as tf valid_true = tf.argmax(valid_labels, 1) target_names = ['class sg', 'class bm', 'class wd', 'class wt', 'class wj', 'class wo', 'class ym', 'class shq', 'class shj', 'class no', 'class yh', 'class fb'] init = tf.initialize_all_variables() config=tf.ConfigProto() config.gpu_options.allow_growth=True #init=tf.initialize_all_variables() def train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False): with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) if restore: tf.train.Saver().restore(sess,path) feed_dict={ testnum: test_num, trainnum: train_num, learnrate:lr } for i in range(loop_count): loss_np, _, label_np, image_np, inf_np = sess.run( [loss, opti, batch_label, batch_image, inf],feed_dict=feed_dict) if i > 0 and i % report_step == 0:
tensorflow.train.Coordinator
5,091
import tensorflow as tf self._initializers = util.check_initializers( initializers, self.POSSIBLE_INITIALIZER_KEYS) def _set_default_initializer(self, var_name): """Sets up a default initializer for a variable if one doesn't exist. For the offset (beta), a zeros initializer is used by default. For the scale (gamma), a ones initializer is used by default. Args: var_name: name of variable as a string. """ if var_name not in self._initializers: if var_name == self.GAMMA: self._initializers[self.GAMMA] = tf.ones_initializer() elif var_name == self.BETA: self._initializers[self.BETA] = tf.zeros_initializer def _build_statistics_variance(self, input_batch, reduction_indices, use_batch_stats): """Builds the statistics part of the graph when using moving variance. Args: input_batch: Input batch Tensor. reduction_indices: Indices of `input_batch` to reduce over. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned.
tensorflow.ones_initializer
5,092
import tensorflow as tf ## End new version if self._normalize_cols: logits_vec = logits_vec - tf.math.reduce_logsumexp( logits_vec, axis=0)[None] relabel_indices = tf.random.categorical(logits=logits_vec, num_samples=1) ### Metrics global_step = tf.compat.v1.train.get_or_create_global_step() orig_indices = tf.range( self._sample_batch_size, dtype=relabel_indices.dtype) with tf.name_scope("relabelling"): # How often are the originally commanded goals most optimal? opt_indices = tf.argmax(logits_vec, axis=1) orig_is_opt = opt_indices == orig_indices orig_opt_frac = tf.reduce_mean(tf.cast(orig_is_opt, tf.float32)) tf.compat.v2.summary.scalar(
tensorflow.range
5,093
import tensorflow as tf Args: input_batch: Input batch Tensor. reduction_indices: Indices of `input_batch` to reduce over. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned. Returns: Tuple of (mean, variance). """ # Set up our moving statistics. When connecting in parallel, this is shared. self._moving_mean = tf.get_variable( "moving_mean", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.zeros_initializer(), trainable=False) self._moving_variance = tf.get_variable( "moving_variance", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.ones_initializer(), trainable=False) def build_batch_stats(): """Builds the batch statistics calculation ops.""" # We use the moving mean as an estimate of the mean in order to perform
tensorflow.zeros_initializer
5,094
import tensorflow as tf if tf.test.is_built_with_cuda(): with tf.device('/cpu:0'):
tensorflow.device
5,095
import tensorflow as tf def characters(filename, batch_size, sequence_size): """Returns a dataset of characters from the given file.""" def _to_chars(line): """string scalar -> Dataset of characters (string scalars).""" chars, = tf.py_func(_split_string, [line + "\n"], [tf.string]) chars.set_shape([None]) return tf.data.Dataset.from_tensor_slices(chars) return (tf.data.TextLineDataset([filename]) .flat_map(_to_chars) .repeat() .batch(tf.to_int64(sequence_size)) .shuffle(1000) .batch(tf.to_int64(batch_size)))
tensorflow.data.Dataset.from_tensor_slices
5,096
import tensorflow as tf tf.flags.DEFINE_boolean('distortions', True, """Enable/disable distortions during image preprocessing. These include bbox and color distortions.""") tf.flags.DEFINE_string('local_parameter_device', 'gpu', """Device to use as parameter server: cpu or gpu. For distributed training, it can affect where caching of variables happens.""") tf.flags.DEFINE_string('device', 'gpu', """Device to use for computation: cpu or gpu""") #tf.flags.DEFINE_string('data_format', 'NCHW', tf.flags.DEFINE_string('data_format', 'NHWC', """Data layout to use: NHWC (TF native) or NCHW (cuDNN native).""") tf.flags.DEFINE_integer('num_intra_threads', 1, """Number of threads to use for intra-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_integer('num_inter_threads', 0, """Number of threads to use for inter-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_string('trace_file', None, """Enable TensorFlow tracing and write trace to this file.""") tf.flags.DEFINE_string('graph_file', None, """Write the model's graph definition to this file. Defaults to binary format unless filename ends
tensorflow.flags.DEFINE_integer
5,097
import tensorflow as tf def testEmbeddingRNNSeq2Seq(self): with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)] dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)] cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape)
tensorflow.nn.seq2seq.embedding_rnn_seq2seq
5,098
import tensorflow as tf c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = tf.transpose(outputs, [1, 0, 2]) outputs = tf.reshape(outputs, [-1, config.hidden_size]) return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) def _get_lstm_cell(self, config, is_training): #if config.rnn_mode == BASIC: # return tf.contrib.rnn.BasicLSTMCell( # config.hidden_size, forget_bias=0.0, state_is_tuple=True, # reuse=not is_training) #if config.rnn_mode == BLOCK:
tensorflow.contrib.rnn.LSTMStateTuple
5,099