seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob,
noise_shape=noise_shape)
size = tf.shape(encoder_inputs_)[2]
noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob,
noise_shape=noise_shape)
if encoder.input_layers:
for j, layer_size in enumerate(encoder.input_layers):
| tensorflow.nn.dropout | 4,300 |
import tensorflow as tf
# Apply 1 x 1 convolution to each half separately
W_half_1 = self._make_var('W_half_1', (1, 1, in_ch, out_ch >> 1))
X_half_1 = tf.nn.conv2d(half_1, W_half_1, (1, 1, 1, 1), padding='VALID')
W_half_2 = self._make_var('W_half_2', (1, 1, in_ch, out_ch >> 1))
| tensorflow.nn.conv2d | 4,301 |
import tensorflow as tf
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
hparams.model_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
| tensorflow.train.CheckpointSaverHook | 4,302 |
import tensorflow as tf
loss: A tensor for triplet loss. Shape = [].
summaries: A dictionary for loss and batch statistics summaries.
"""
def maybe_expand_sample_dim(embeddings):
if len(embeddings.shape.as_list()) == 2:
return tf.expand_dims(embeddings, axis=-2)
return embeddings
anchor_embeddings = maybe_expand_sample_dim(anchor_embeddings)
positive_embeddings = maybe_expand_sample_dim(positive_embeddings)
match_embeddings = maybe_expand_sample_dim(match_embeddings)
| tensorflow.expand_dims | 4,303 |
import tensorflow as tf
def train(self, iterations, validation_interval=100, output_dir=None,
save_interval=None, checkpoint_path=None, keep_checkpoints=1):
assert 'training' in self.datasets, 'Training dataset is required.'
if output_dir is not None:
train_writer = tf.summary.FileWriter(output_dir)
if not hasattr(self, 'saver'):
with tf.device('/cpu:0'):
self.saver = tf.train.Saver(save_relative_paths=True,
max_to_keep=keep_checkpoints)
| tensorflow.summary.FileWriter | 4,304 |
import tensorflow as tf
tf.constant(config.global_norm_lvalues, dtype=tf.int64)), 0)
rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64),
tf.constant(config.global_norm_rvalues, dtype=tf.int64)), 1)
else:
| tensorflow.constant | 4,305 |
import tensorflow as tf
if gpu_idx == 0:
update = tf.assign(num_error_rate, num_error_rate + 1.)
with tf.control_dependencies([update]):
tc = tf.maximum(.01, 1. / num_error_rate)
update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate)
with tf.control_dependencies([update]):
self.d_loss_class = tf.identity(self.d_loss_class)
self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.end_points_D['D_on_G_logits'],
labels=tf.zeros_like(self.end_points_D['D_on_G_logits']))
self.d_loss_class = tf.reduce_mean(self.d_loss_class)
self.d_loss_real = tf.reduce_mean(self.d_loss_real)
self.d_loss_fake = tf.reduce_mean(self.d_loss_fake)
if is_fm_loss:
global_pool_head = self.end_points_D['global_pool']
real_data_features = tf.slice(global_pool_head, [0, 0], [batch_size_train, num_classes])
fake_data_features = tf.slice(global_pool_head, [batch_size_train, 0],
[batch_size_train, num_classes])
self.g_loss = self._feature_matching_loss(real_data_features, fake_data_features)
else:
generator_target_prob = self.cnf['generator_target_prob'] # 0.75 / 2.0
self.g_loss = self._sigmoid_kl_with_logits(self.end_points_D['D_on_G_logits'],
generator_target_prob)
| tensorflow.reduce_mean | 4,306 |
import tensorflow as tf
inputs=embed_inputs,
dtype=tf.float32,
sequence_length=self.seq_len,
) ## (batch_size, seq_len, num_hidden)
# rnn_outputs = tf.transpose(rnn_outputs, perm=[1,0,2]) ## (seq_len, batch_size, num_hidden) NOT NEEDED ANY MORE
last_outputs = self.last_relevant(rnn_outputs, self.seq_len) ## (batch_size, num_hidden)
with tf.variable_scope('output', reuse=forward_only):
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [self.num_hidden, self.num_classes],
# initializer=tf.random_uniform_initializer(-0.003, 0.003))
initializer=tf.contrib.layers.xavier_initializer())
# initializer=tf.truncated_normal_initializer(stddev=0.1))
b = tf.get_variable('b', [self.num_classes], initializer=tf.constant_initializer(0.1))
logits = tf.matmul(last_outputs, W) + b
self.embed_inputs = embed_inputs
return logits
def loss(self, logits, forward_only=None):
cost = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self.y, tf.float32))
mean_cost = tf.reduce_mean(cost)
y_pred = tf.argmax(logits, 1)
| tensorflow.contrib.layers.xavier_initializer | 4,307 |
import tensorflow as tf
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
save = tf.train.Saver({"v": v}, max_to_keep=2,
keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
| tensorflow.train.Saver | 4,308 |
import tensorflow as tf
| tensorflow.variable_scope | 4,309 |
import tensorflow as tf
trans_z = linear(trans_h0, featsize, 'trans_z')
self.translated_z = trans_z
with tf.variable_scope("deconv") as scope:
s_h, s_w = self.output_height, self.output_width
s_h2, s_h4, s_h8, s_h16 = \
| tensorflow.variable_scope | 4,310 |
import tensorflow as tf
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# Setup Index Matrix for one-hot-encoding
identity_mat = tf.diag(tf.ones(shape=[embedding_size]))
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
y_target = tf.placeholder(shape=[1, 1], dtype=tf.float32)
# Text-Vocab Embedding
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0)
# Declare model operations
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
| tensorflow.nn.embedding_lookup | 4,311 |
import tensorflow as tf
tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings]))
repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2)
pred_ = predictions**y_pow
try:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(labels, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), labels)
nom = tf.reduce_sum(weights * conf_mat)
denom = tf.reduce_sum(weights * tf.matmul(
tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) /
| tensorflow.reduce_sum | 4,312 |
import tensorflow as tf
def input_fn_builder(
input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4
):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions": tf.FixedLenFeature(
[max_predictions_per_seq], tf.int64
),
"masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights": tf.FixedLenFeature(
[max_predictions_per_seq], tf.float32
),
"next_sentence_labels": tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
| tensorflow.FixedLenFeature | 4,313 |
import tensorflow as tf
tf.scalar_summary(loss.op.name, loss)
| tensorflow.scalar_summary | 4,314 |
import tensorflow as tf
def keep_prob(dropout, train):
return tf.cond(train, lambda: tf.constant(dropout), lambda: tf.constant(1.))
| tensorflow.constant | 4,315 |
import tensorflow as tf
log_probs = tf.reduce_sum(log_probs, 3) + \
| tensorflow.reduce_sum | 4,316 |
import tensorflow as tf
dtype=tf.float32,
initializer=tf.constant_initializer(
self.vocab.char_embeddings[:2],
dtype=tf.float32),
trainable=True)
self.char_mat = tf.concat([self.char_pad_unk_mat, self.pretrained_char_mat], axis=0)
else:
self.word_mat = tf.get_variable(
'word_embeddings',
shape=[self.vocab.word_size(), self.vocab.word_embed_dim],
| tensorflow.concat | 4,317 |
import tensorflow as tf
with tf.name_scope("softmax_linear"):
weights = tf.Variable(
tf.truncated_normal([32, 10],
stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = tf.Variable(tf.zeros([10]),
name="biases")
logits = tf.matmul(hidden2, weights) + biases
tf.add_to_collection("logits", logits)
# Runs to logit.
tf.initialize_all_variables().run()
sess.run(logits)
# Creates a saver.
saver0 = tf.train.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self):
test_dir = os.path.join(self.get_temp_dir(), "graph_extension")
filename = os.path.join(test_dir, "metafile")
| tensorflow.initialize_all_variables | 4,318 |
import tensorflow as tf
class TransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NCHW') :
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, out_dim, input_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[out_dim], initializer=tf.constant_initializer(0.0))
self.data_format = data_format
if( data_format =='NCHW' ):
self.strides = [1, 1, d_h, d_w]
else:
| tensorflow.constant_initializer | 4,319 |
import tensorflow as tf
for use_tensor in [True, False]:
with self.test_session() as sess:
var = tf.Variable(1.0, name="var0")
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
if use_tensor:
global_step = tf.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
| tensorflow.constant | 4,320 |
import tensorflow as tf
self.mixed_label = tf.gather(self.mixed_label, mixed_idx)
self.mixed_pred, mixed_end_points = self.get_pred(self.mixed_pc)
self.mixed_loss = self.get_loss(self.mixed_pred, self.mixed_label, mixed_end_points)
with tf.variable_scope('discriminator') as scope:
self.real_prob, self.real_logit = self.discriminator(self.real_pc_rotated, scope=scope, **disc_kwargs)
self.synthetic_prob, self.synthetic_logit = self.discriminator(self.gen_out_rotated, reuse=True, scope=scope, **disc_kwargs)
# Compute WGAN losses
self.loss_d = tf.reduce_mean(self.synthetic_logit) - tf.reduce_mean(self.real_logit) # comparing rotated fake and real images
self.loss_g = -tf.reduce_mean(self.synthetic_logit)
# Add rotation loss
if self.ms_task:
self.g_ms_loss = tf.abs(self.gen_out_rot_loss - self.real_pc_rot_loss, name = 'abs')
self.d_ms_loss = self.mixed_loss
self.loss_d_rot = self.loss_d + self.weight_rotation_loss_d * self.d_ms_loss
self.loss_g_rot = self.loss_g + self.weight_rotation_loss_g * self.g_ms_loss
else:
self.loss_d_rot = self.loss_d + self.weight_rotation_loss_d * self.real_pc_rot_loss
self.loss_g_rot = self.loss_g + self.weight_rotation_loss_g * self.gen_out_rot_loss
# Compute gradient penalty at interpolated points
ndims = self.real_pc.get_shape().ndims #(1024, 3)
alpha = tf.random_uniform(shape=[self.batch_size] + [1] * (ndims - 1), minval=0., maxval=1.)
differences = self.generator_out - self.real_pc
interpolates = self.real_pc + (alpha * differences)
with tf.variable_scope('discriminator') as scope:
| tensorflow.abs | 4,321 |
from tensorflow.python.framework import ops
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
"""Common shape function for segment reduction ops."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
segment_ids_shape.assert_has_rank(1)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMean")
@ops.RegisterShape("SparseSegmentSum")
def _SparseSegmentReductionShape(op):
"""Common shape function for sparse segment reduction ops."""
data_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
indices_shape.assert_has_rank(1)
segment_ids_shape = op.inputs[2].get_shape()
segment_ids_shape.assert_has_rank(1)
indices_shape.assert_is_compatible_with(segment_ids_shape)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
| tensorflow.python.framework.ops.RegisterShape | 4,322 |
from tensorflow.python.ops import init_ops
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
| tensorflow.python.ops.init_ops.constant_initializer | 4,323 |
import tensorflow as tf
@staticmethod
def last_relevant(outputs, length):
# Borrowed from: https://gist.github.com/rockt/f4f9df5674f3da6a32786bcf9fbb6a88
batch_size, max_length, hidden_size = tf.unstack(tf.shape(outputs))
index = tf.range(0, batch_size) * max_length + (tf.cast(length, tf.int32) - 1)
flat = tf.reshape(outputs, [-1, hidden_size])
relevant = tf.gather(flat, index)
return relevant
| tensorflow.gather | 4,324 |
import tensorflow as tf
self.Y = tf.placeholder(tf.float32, (None, None, n_mels * resampled))
self.decoder_inputs = tf.concat((tf.zeros_like(self.Y[:, :1, :]), self.Y[:, :-1, :]), 1)
self.decoder_inputs = self.decoder_inputs[:, :, -n_mels:]
self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1))
batch_size = tf.shape(self.X)[0]
seq_lens = tf.count_nonzero(tf.reduce_sum(self.decoder_inputs, -1), 1, dtype=tf.int32) + 1
def cells(reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(
size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse
| tensorflow.reduce_sum | 4,325 |
import tensorflow as tf
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
| tensorflow.logging.info | 4,326 |
import tensorflow as tf
output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
output_data = tf.nn.elu(output_data)
current_size = output_sizes[i]
#expand_W = tf.get_variable("final_W", [current_size, 1])
#expand_b = tf.get_variable("final_b" , [1])
#output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
return output_data
output_propensity_list = []
for i in range(list_size):
# Add position information (one-hot vector)
click_feature = [tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(4*list_size)]
click_feature[i] = tf.expand_dims(tf.ones_like(self.labels[i]) , -1)
# click_feature[list_size:]=[tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(3*list_size)]
click_feature[list_size:list_size+i] =[tf.expand_dims(self.labels[k] , -1) for k in range(i-1,-1,-1)]
click_feature[2*list_size:2*list_size+i+1]=[tf.expand_dims(self.types[k] , -1) for k in range(i,-1,-1)]
click_feature[3*list_size:3*list_size+list_size-i-1]=[tf.expand_dims(self.types[k] , -1) for k in range(i+1,list_size)]
# Predict propensity with a simple network
output_propensity_list.append(propensity_network(tf.concat(click_feature, 1), i))
self.click_show=[click_feature[h][0] for h in range(4*list_size)]
return tf.concat(output_propensity_list,1)
def step(self, session, input_feed, forward_only):
"""Run a step of the model feeding the given inputs.
| tensorflow.ones_like | 4,327 |
import tensorflow as tf
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
| tensorflow.logging.info | 4,328 |
import tensorflow as tf
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = tf_util.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tf.summary.scalar("td_error", tf.reduce_mean(td_error))
tf.summary.scalar("loss", weighted_error)
| tensorflow.reduce_mean | 4,329 |
from tensorflow.contrib.learn.python.learn import ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
"""Ops tests."""
def test_softmax_classifier(self):
with self.cached_session() as session:
features = array_ops.placeholder(dtypes.float32, [None, 3])
labels = array_ops.placeholder(dtypes.float32, [None, 2])
weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]])
biases = constant_op.constant([0.2, 0.3])
class_weight = constant_op.constant([0.1, 0.9])
prediction, loss = ops.softmax_classifier(features, labels, weights,
biases, class_weight)
self.assertEqual(prediction.get_shape()[1], 2)
self.assertEqual(loss.get_shape(), [])
value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]})
self.assertAllClose(value, 0.55180627)
def test_embedding_lookup(self):
d_embed = 5
n_embed = 10
ids_shape = (2, 3, 4)
embeds = np.random.randn(n_embed, d_embed)
ids = np.random.randint(0, n_embed, ids_shape)
| tensorflow.contrib.learn.python.learn.ops.softmax_classifier | 4,330 |
import tensorflow as tf
flags.DEFINE_string("optimizer", "adamw", "Optimizer to use")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
| tensorflow.flags.DEFINE_string | 4,331 |
import tensorflow as tf
pi_dropout_mask_phs = pi_dropout_mask_generator.generate_dropout_mask_placeholders()
pi, pi_reg = mlp_variational(x, pi_dropout_mask_phs, list(hidden_sizes) + [act_dim], activation, output_activation, dropout_rate)
pi = act_limit * pi
with tf.variable_scope('q1'):
q1_in_ph = tf.concat([x, a], axis=-1)
q1_in_dim = q1_in_ph.shape.as_list()[1]
q1_dropout_mask_generator = DropoutMaskGenerator(q1_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate)
q1_dropout_mask_phs = q1_dropout_mask_generator.generate_dropout_mask_placeholders()
| tensorflow.concat | 4,332 |
import tensorflow as tf
with self.assertRaisesWithPredicateMatch(
IOError, lambda e: "does not exist"):
tf.train.import_meta_graph(filename)
def testSliceVariable(self):
test_dir = self._TestDir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = tf.train.Saver({"first": v1, "second": v2})
tf.initialize_all_variables().run()
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with tf.Graph().as_default():
# Restores from MetaGraphDef.
| tensorflow.Variable.SaveSliceInfo | 4,333 |
import tensorflow as tf
lambda: tf.no_op())
with tf.control_dependencies([p]):
return tf.reduce_mean(loss)
loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32,
parallel_iterations=32)
final_loss = tf.reduce_mean(loss)
return final_loss
def contra_traj_lossV1(pred, tgt, temp=10.0):
# Trajectory-wise contrastive loss
traj_pred = tf.reduce_mean(pred, axis=1)
traj_tgt = tf.reduce_mean(tgt, axis=1)
p1, p2 = tf.split(traj_pred, 2, axis=0)
t1, t2 = tf.split(traj_tgt, 2, axis=0)
soft_sign = tf.tanh((t1 - t2) * temp)
loss = tf.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2)))
loss = tf.reduce_mean(loss)
return loss
def horizon_sumV1(input, horizon=12):
bs, epi_len = input.shape[:2]
new_w = epi_len - horizon + 1
| tensorflow.reduce_mean | 4,334 |
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
it.next()
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)
self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)
self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*")))
class EagerLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkEagerLinearRegression(self):
num_epochs = 10
num_batches = 200
| tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.fit | 4,335 |
import tensorflow as tf
net = InputLayer(inputs, name='observation')
net = DenseLayer(net, n_units=4, act=tf.identity,
W_init=tf.random_uniform_initializer(0, 0.01), b_init=None, name='q_a_s')
y = net.outputs # action-value / rewards of 4 actions
predict = tf.argmax(y, 1) # chose action greedily with reward. in Q-Learning, policy is greedy, so we use "max" to select the next action.
## Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
nextQ = tf.placeholder(shape=[1, 4], dtype=tf.float32)
loss = tl.cost.mean_squared_error(nextQ, y, is_mean=False) # tf.reduce_sum(tf.square(nextQ - y))
train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
## Set learning parameters
lambd = .99 # decay factor
e = 0.1 # e-Greedy Exploration, the larger the more random
num_episodes = 10000
with tf.Session() as sess:
tl.layers.initialize_global_variables(sess)
for i in range(num_episodes):
| tensorflow.train.GradientDescentOptimizer | 4,336 |
import tensorflow as tf
wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector')
summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
summary.add_moving_summary(cost, wd_cost)
self.cost = tf.add_n([wd_cost, cost], name='cost')
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=5e-4, trainable=False)
opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
return optimizer.apply_grad_processors(
opt, [
gradproc.ScaleGradient(('STN.*', 0.1)),
gradproc.SummaryGradient()])
def get_data(isTrain):
| tensorflow.get_variable | 4,337 |
import tensorflow as tf
start_string = request.data.decode().lower()
n_words = 5
hidden = [tf.zeros((1, units))]
for i in range(n_words):
start_words = start_string.split()
input_eval = [word2idx[i] for i in start_words]
input_eval = tf.expand_dims(input_eval, 0)
predictions, hidden = model(input_eval, hidden)
predicted_id = tf.argmax(predictions[-1]).numpy()
start_string += " " + idx2word[predicted_id]
| tensorflow.expand_dims | 4,338 |
import tensorflow as tf
for i in range(len(propensity_list)):
pw_i = propensity_list[0] / propensity_list[i]
pw_list.append(pw_i)
propensity_weights = tf.stack(pw_list, axis=1)
if self.hparams.max_propensity_weight > 0:
propensity_weights = tf.clip_by_value(propensity_weights, clip_value_min=0, clip_value_max=self.hparams.max_propensity_weight)
| tensorflow.stack | 4,339 |
import tensorflow as tf
# Final (unnormalized) layer
with tf.name_scope("output"):
W = tf.get_variable("W",
shape=[total_filters, nb_classes],
initializer=tf.contrib.layers.xavier_initializer())
# add final layer bias
b = tf.Variable(tf.constant(0.1, shape=[nb_classes]), name="b")
# calc l2 losses
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
# do logit = W*X+b
logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores")
predictions = tf.nn.softmax(logit, name="predictions")
#claulate loss and optimizer
with tf.variable_scope("FCoptimize", reuse=None):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y)
+ l2_reg_lambda * l2_loss)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# calculate accuracy
correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
print ("done...")
| tensorflow.nn.softmax | 4,340 |
import tensorflow as tf
moving_mean = self._make_var('moving_mean', (in_ch,), trainable=False, init_constant=0)
moving_variance = self._make_var('moving_variance', (in_ch,), trainable=False, init_constant=1)
if is_train:
# For training, do batch norm with batch mean & variance
# Update moving averages if training
(X, mean, variance) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)
update_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
update_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)
with tf.control_dependencies([update_mean, update_variance]):
X = tf.identity(X)
else:
# For prediction, do batch norm with computed moving mean & variance from training
# Don't update moving averages if predicting
(X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, mean=moving_mean, variance=moving_variance,
epsilon=epsilon, is_training=False)
else:
(X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)
| tensorflow.control_dependencies | 4,341 |
import tensorflow as tf
self.build_placeholders()
self.build_perception()
self.build_manager()
self.build_worker()
self.build_loss()
self.var_list = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
# for v in self.var_list:
# print v.name
self.state_in = [self.worker_lstm.state_in[0],
self.worker_lstm.state_in[1],
| tensorflow.get_variable_scope | 4,342 |
import tensorflow as tf
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
| tensorflow.minimum | 4,343 |
import tensorflow as tf
returns the variable list after applying the filter rule.
Returns:
tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.
"""
if params['transpose_input'] and mode == tf.estimator.ModeKeys.TRAIN:
features['images'] = tf.transpose(features['images'], [3, 0, 1, 2])
image_size = (params['image_size'], params['image_size'])
all_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], image_size)
| tensorflow.transpose | 4,344 |
import tensorflow as tf
tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1])
return pred1, pred2, tgt1, tgt2
return uniform
def contra_traj_lossV5(pred, tgt, horizon=12, resample=1, hard_ratio=1.0):
horizon_pred = horizon_sumV1(pred, horizon)
horizon_tgt = horizon_sumV1(tgt, horizon)
pred_flat = tf.reshape(horizon_pred, [-1])
tgt_flat = tf.reshape(horizon_tgt, [-1])
batch = tf.stack([pred_flat, tgt_flat], 1)
sample_func = sample_pair(batch)
def sample_compute(_):
pairs = sample_func()
loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio)
pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32)
| tensorflow.reshape | 4,345 |
import tensorflow as tf
embedding = config.embeddings.add()
# Specifiy the embedding variable and the metadata
embedding.tensor_name = embedding_name
embedding.metadata_path = path_to_meta
# Project the embeddings to space dimensions for visualization
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config)
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i])
tf.summary.scalar("regularization_loss", model.regularization_loss)
| tensorflow.summary.histogram | 4,346 |
import tensorflow as tf
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
| tensorflow.gfile.MakeDirs | 4,347 |
import tensorflow as tf
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config)
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
| tensorflow.summary.histogram | 4,348 |
import tensorflow as tf
def _input_fn():
# Provide the ability to decode a CSV
def decode_csv(line):
# all_data is a list of scalar tensors
all_data = tf.decode_csv(line, record_defaults = DEFAULTS)
inputs = all_data[:len(all_data) - N_OUTPUTS] # first N_INPUTS values
labels = all_data[len(all_data) - N_OUTPUTS:] # last N_OUTPUTS values
# Convert each list of rank R tensors to one rank R+1 tensor
inputs = tf.stack(inputs, axis = 0)
labels = tf.stack(labels, axis = 0)
# Convert input R+1 tensor into a feature dictionary of one R+1 tensor
features = {TIMESERIES_COL: inputs}
return features, labels
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
| tensorflow.stack | 4,349 |
import tensorflow as tf
"output_bias", shape=[2], initializer=tf.zeros_initializer()
)
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
| tensorflow.nn.bias_add | 4,350 |
from tensorflow.python.ops import math_ops
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(None, 'false_negatives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fn = set_ops.set_size(set_ops.set_difference(predictions_idx,
labels,
aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
weights = math_ops.to_double(weights)
fn = math_ops.mul(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(predictions_idx,
labels,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
| tensorflow.python.ops.math_ops.to_double | 4,351 |
import tensorflow as tf
pass
'''
def create_custom_regularizers(self):
# should not be an empty list
return [0.]
'''
# save in self.regularizers the regularizers of the model
def create_regularizers_and_updates(self):
wb_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# see keras_utils.py: activity_and_contractive_regularizers
ac_regularizers = tf.get_collection(AC_REGULARIZATION)
# if (not wb_regularizers) and (not ac_regularizers):
# wb_regularizers = [tf.constant(0.)]
#import pdb;pdb.set_trace()
if len(wb_regularizers)>0:
self.regularizers += wb_regularizers
if len(ac_regularizers)>0:
self.regularizers += ac_regularizers
# self.regularizers += ([self.custom_regularizers[r] for r in self._opts["regularizers"].keys() if len(self.custom_regularizers[r])>0])
# we need to flatten the list if we have both custom regularizers and another type of regularizers
# (weight/bias or contractive)
| tensorflow.get_collection | 4,352 |
import tensorflow as tf
import random
import tensorflow as tf
FLAGS = flags.FLAGS
# augmentation functions
# augment
def random_crop_and_resize(images, ratio=0.8):
b, h, w, c = images.get_shape().as_list()
ch, cw = map(lambda x: int(x * ratio), (h, w))
crop = tf.random_crop(images, size=[b, ch, cw, 3])
crop = tf.image.resize(crop, [h, w])
return crop
def random_apply(fn, image, prob=1.):
b, *_ = image.get_shape().as_list()
chance = tf.less(tf.random_uniform([b], 0, 1.0), prob)
return tf.where(chance, fn(image), tf.identity(image))
def color_distortion(image, s=1.0):
lower, upper, x = (1 - 0.8 * s), (1 + 0.8 * s), image
x = tf.image.random_brightness(x, max_delta=0.8*s)
| tensorflow.random_crop | 4,353 |
from tensorflow.python.platform import gfile
train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv')
if not (gfile.Exists(train_path) and gfile.Exists(test_path)):
archive_path = base.maybe_download(
| tensorflow.python.platform.gfile.Exists | 4,354 |
import tensorflow as tf
for w, filter_size in enumerate(encoder.convolutions, 1):
filter_ = get_variable('filter_{}'.format(w), [w, encoder.embedding_size, filter_size])
if w > 1:
right = (w - 1) // 2
left = (w - 1) - right
pad_right = tf.tile(pad, [1, right, 1])
pad_left = tf.tile(pad, [1, left, 1])
inputs_ = tf.concat([pad_left, encoder_inputs_, pad_right], axis=1)
else:
inputs_ = encoder_inputs_
inputs_ = tf.nn.convolution(inputs_, filter=filter_, padding='VALID')
inputs.append(inputs_)
| tensorflow.tile | 4,355 |
import tensorflow as tf
# Use indices to lookup pixels in the flat image and restore
# channels dim
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.to_float(im_flat)
i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0)
i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1)
i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0)
i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1)
i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0)
i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1)
i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0)
i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1)
# Finally calculate interpolated values.
x0_f = tf.to_float(x0)
x1_f = tf.to_float(x1)
y0_f = tf.to_float(y0)
| tensorflow.gather | 4,356 |
import tensorflow as tf
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
| tensorflow.contrib.layers.flatten | 4,357 |
import tensorflow as tf
"Cannot use sequence length %d because the model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, config.max_position_embeddings))
else:
config = None # Get the config from TF-Hub.
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
| tensorflow.gfile.MakeDirs | 4,358 |
import tensorflow as tf
def test_two(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
output0 = f(tf.constant([1]), tf.constant([2]))
output1 = f(tf.constant([2]), tf.constant([3]))
tp = pool.ThreadPool(2)
f0 = tp.apply_async(session.run, [output0])
f1 = tp.apply_async(session.run, [output1])
# Make sure both inputs are in the batcher before starting it.
| tensorflow.constant | 4,359 |
import tensorflow as tf
"root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = tf.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
| tensorflow.constant | 4,360 |
import tensorflow as tf
if FLAGS.contrast_norm == 'areafactor':
image = tf.decode_raw(features['image_raw'], tf.float32)
else:
| tensorflow.decode_raw | 4,361 |
import tensorflow as tf
assert tf.get_variable_scope().reuse is False
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable('scale',[x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable('offset',[x.get_shape()[-1]],initializer=tf.constant_initializer(0.0))
out = scale*tf.div(x-mean, tf.sqrt(var+epsilon)) + offset
return out
def common_conv2d(layer_input,filters,f_size=4,stride=2,padding='SAME',norm=True,name='common_conv2d'):
"""Layers used during downsampling"""
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
d = tf.contrib.layers.conv2d(layer_input,filters,kernel_size=f_size,stride=stride,padding=padding)
if norm:
d = tf.contrib.layers.batch_norm(d)
d = lrelu(d,alpha=0.2)
return d
#def common_deconv2d(layer_input,skip_input, filters,f_size=4,stride=2,dropout_rate=0,name='common_deconv2d'):
| tensorflow.variable_scope | 4,362 |
import tensorflow as tf
blk_shape[0], q_shape[1] * strides[1], q_shape[2] * strides[2], 3
], strides)
blk_indices_crop = blk_indices_crop // tf.stack([1, strides[1], strides[2]])
return blk_indices_crop
def _strides_one():
# Calculate otuput indices when strides = 1.
return blk_indices[:, :q_shape[1], :q_shape[2], :]
strides_gt_one = tf.logical_or(tf.greater(strides[1], 1), tf.greater(strides[2], 1))
blk_indices_crop = tf.cond(strides_gt_one, _strides_gt_one, _strides_one)
y = tf.scatter_nd(blk_indices_crop, q, out_shape)
return y
return tf.cond(
tf.equal(tf.size(blk_indices_), 0), lambda: tf.zeros(out_shape, dtype=x.dtype),
_conv_nonzero)
| tensorflow.greater | 4,363 |
import tensorflow as tf
encoder_inputs_ = tf.concat(inputs, axis=2)
# if encoder.convolution_activation.lower() == 'relu':
encoder_inputs_ = tf.nn.relu(encoder_inputs_)
if encoder.maxout_stride:
| tensorflow.nn.relu | 4,364 |
import tensorflow as tf
'input_node': input_node,
'hidden_layers_node': hidden_layers_node,
'output_node': output_node,
'learning_rate': learning_rate,
'learning_rate_decay': learning_rate_decay,
'activation': activation,
'L1_reg': L1_reg,
'L2_reg': L2_reg,
'optimizer': optimizer,
'dropout': dropout_keep_prob
}
# Set random state
tf.set_random_seed(seed)
# create new Session for the DeepSurv Class
self.sess = tf.Session(graph=G)
# Initialize all global variables
self.sess.run(init_op)
def train(self, num_epoch=5000, iteration=-1,
plot_train_loss=False, plot_train_CI=False):
"""
Training DeepSurv network.
Parameters:
| tensorflow.set_random_seed | 4,365 |
import tensorflow as tf
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
precision = tf_metrics.precision(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
| tensorflow.argmax | 4,366 |
import tensorflow as tf
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
| tensorflow.matmul | 4,367 |
import tensorflow as tf
initializer=initialization.xavier_initializer(
shape=ff_dhw + [lower_feats, higher_feats],
dtype=self.dtype,
uniform=self.normal_initializer),
trainable=True))
setattr(
self,
'ff_bias_%s' % idx,
tf.get_variable(
name='%s_ff_bias_%s' % (self.layer_name, idx),
dtype=self.dtype,
initializer=tf.ones([higher_feats], dtype=self.dtype),
trainable=True))
lower_feats = higher_feats
# HGRU KERNELS
for idx, layer in enumerate(self.hgru_ids):
with tf.variable_scope(
'%s_hgru_weights_%s' % (self.layer_name, layer)):
setattr(
self,
'horizontal_kernels_%s' % layer,
| tensorflow.ones | 4,368 |
import tensorflow as tf
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
| tensorflow.variable_scope | 4,369 |
import tensorflow.contrib as contrib
if cross_stitch_enabled:
with tf.variable_scope("cross_stitch_3"):
stitch3_1, stitch3_2 = apply_cross_stitch(fc3_1, fc3_2)
else:
stitch3_1, stitch3_2 = fc3_1, fc3_2
dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training,
scope="dropout3_1")
dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training,
scope="dropout3_2")
output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1")
output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2")
with tf.variable_scope("loss"):
loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1))
loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses)
with tf.variable_scope("evaluation"):
accuracy_1 = tf.reduce_mean(tf.cast(tf.equal(
| tensorflow.contrib.layers.fully_connected | 4,370 |
import tensorflow as tf
argus_t_ge_1 = m * tf.pow(u, p) * tf.exp(c * u)
return tf.maximum(tf.zeros_like(m), argus_t_ge_1, name="argus_pdf")
# // --- Construct signal+background PDF ---
# RooRealVar nsig("nsig","#signal events",200,0.,10000) ;
# RooRealVar nbkg("nbkg","#background events",800,0.,10000) ;
nsig = tf.Variable(200, name="nsig", dtype=tf.float64)
nbkg = tf.Variable(800, name="nbkg", dtype=tf.float64)
vdict['nsig'] = nsig
vdict['nbkg'] = nbkg
# RooAddPdf sum("sum","g+a",RooArgList(gauss,argus),RooArgList(nsig,nbkg)) ;
# // --- Generate a toyMC sample from composite PDF ---
| tensorflow.Variable | 4,371 |
import tensorflow as tf
expected_shape)
def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 8
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
| tensorflow.random_uniform | 4,372 |
import tensorflow as tf
# DropNeuro
# P_o = cost.lo_regularizer(0.03)(
# self.train_params[0]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # <haodong>: if add lo on decoder, no neuron will be broken
# P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2])
# L1 of activation outputs
activation_out = self.all_layers[-2]
L1_a = 0.001 * tf.reduce_mean(activation_out) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black
# L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # <haodong>: some neuron are broken, white and black
# L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # <haodong>: some neuron are broken, white and black
# KL Divergence
beta = 4
rho = 0.15
p_hat = tf.reduce_mean(activation_out, 0) # theano: p_hat = T.mean( self.a[i], axis=0 )
| tensorflow.reduce_mean | 4,373 |
from tensorflow.python.ops import math_ops
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= math_ops.cast(multiplier, grad.dtype)
multiplied_grads_and_vars.append((grad, var))
| tensorflow.python.ops.math_ops.cast | 4,374 |
import tensorflow as tf
def call(self, embeddings, nwords):
raise NotImplementedError
def __call__(self):
with tf.variable_scope("task_independent"):
indices, num_tags, word_ids, nwords = self.input_layer()
embeddings = self.embedding_layer(word_ids)
| tensorflow.variable_scope | 4,375 |
import tensorflow as tf
if self.dale_ratio:
new_output = tf.matmul(
tf.nn.relu(new_state),
tf.matmul(
tf.abs(self.W_out) * self.output_Connectivity,
self.Dale_out,
name="in_2"),
transpose_b=True, name="3")\
+ self.b_out
| tensorflow.abs | 4,376 |
import tensorflow as tf
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
| tensorflow.gfile.MakeDirs | 4,377 |
import tensorflow as tf
def variable(self, name, shape, initializer,regularizer=None):
with tf.device('/cpu:0'):
return tf.get_variable(name, shape, initializer=initializer, regularizer=regularizer, trainable=True)
def fc_layer(self, bottom, in_size, out_size, name):
with tf.variable_scope(name):
weights, biases = self.get_fc_var(in_size, out_size, name)
x = tf.reshape(bottom, [-1, in_size])
fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
tf.summary.histogram('weight', weights)
tf.summary.histogram('bias', biases)
return fc
def get_conv_var(self, filter_size, in_channels, out_channels, name):
initial_value = tf.truncated_normal([filter_size, filter_size, in_channels, out_channels], 0.0, stddev = 1 / math.sqrt(float(filter_size * filter_size)))
| tensorflow.reshape | 4,378 |
from tensorflow.python.framework import ops
# Ensures each output is a Tensor.
outputs = [ops.convert_to_tensor(_) for _ in outputs]
| tensorflow.python.framework.ops.convert_to_tensor | 4,379 |
import tensorflow as tf
decoder=decoders[1], training=training, encoders=decoders[:1]
)
target_weights = get_weights(targets[1][:, 1:], utils.EOS_ID, include_first_eos=True)
xent_loss += reconstruction_weight * sequence_loss(logits=reconstructed_outputs, targets=targets[1][:, 1:],
weights=target_weights)
max_src_len = tf.shape(reconstructed_weights)[1]
batch_size = tf.shape(reconstructed_weights)[0]
attn_loss = tf.matmul(reconstructed_weights, attention_weights) - tf.eye(max_src_len)
src_mask = tf.sequence_mask(encoder_input_length[0], maxlen=max_src_len, dtype=tf.float32)
src_mask = tf.einsum('ij,ik->ijk', src_mask, src_mask)
attn_loss *= tf.to_float(src_mask) # don't take padding words into account
| tensorflow.shape | 4,380 |
from tensorflow.python.ops import math_ops
def compute_mean_iou(name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = math_ops.select(
math_ops.greater(denominator, 0),
denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
return math_ops.reduce_mean(iou, name=name)
mean_iou = compute_mean_iou('mean_iou')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_iou)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou, update_op
| tensorflow.python.ops.math_ops.div | 4,381 |
import tensorflow as tf
'mesh_name': _bytes_feature(d['mesh_name']),
'near_surface_samples': _float_feature(d['near_surface_samples']),
'grid': _float_feature(d['grid']),
'world2grid': _float_feature(d['world2grid']),
'surface_point_samples': _float_feature(d['surface_point_samples'])
}
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def full_featurespec():
return {
| tensorflow.train.Features | 4,382 |
import tensorflow as tf
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \
self.reader.next_batch(dataset_name=cfgs.DATASET_NAME,
batch_size=cfgs.BATCH_SIZE * num_gpu,
shortside_len=shortside_len,
is_training=True)
# data processing
inputs_list = []
for i in range(num_gpu):
img = tf.expand_dims(img_batch[i], axis=0)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img = img / tf.constant([cfgs.PIXEL_STD])
gtboxes_and_label_r = tf.py_func(backward_convert,
inp=[gtboxes_and_label_batch[i]],
Tout=tf.float32)
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
| tensorflow.expand_dims | 4,383 |
import tensorflow as tf
loss = mse_loss + params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name])
total_loss = tf.identity(loss, name='total_loss')
tf.summary.scalar('loss', total_loss)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, predictions=predictions, eval_metric_ops=metrics)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
lr_values = [params['warmup_learning_rate']] + [base_learning_rate * decay for decay in params['lr_decay_factors']]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
[params['warmup_steps']] + [int(float(ep)*params['steps_per_epoch']) for ep in params['decay_boundaries']],
lr_values)
truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype), name='learning_rate')
tf.summary.scalar('lr', truncated_learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate,
momentum=params['momentum'])
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
return tf.estimator.EstimatorSpec(
| tensorflow.constant | 4,384 |
import tensorflow as tf
ops.reset_default_graph()
sess = tf.Session()
my_var = tf.Variable(tf.zeros([1,20]))
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs", graph=sess.graph)
initialize_op = tf.global_variables_initializer()
sess.run(initialize_op)
| tensorflow.global_variables_initializer | 4,385 |
import tensorflow as tf
mask_loss = losses.mask_rcnn_loss(
model_outputs['mask_outputs'], model_outputs['mask_targets'],
model_outputs['select_class_targets'], params)
else:
mask_loss = 0.
if variable_filter_fn:
var_list = variable_filter_fn(tf.trainable_variables(),
params['resnet_depth'])
else:
var_list = None
l2_regularization_loss = _WEIGHT_DECAY * tf.add_n(
[tf.nn.l2_loss(v) for v in var_list
if 'batch_normalization' not in v.name and 'bias' not in v.name])
total_loss = (total_rpn_loss + total_fast_rcnn_loss + mask_loss +
l2_regularization_loss)
host_call = None
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = create_optimizer(learning_rate, params)
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
if not params['resnet_checkpoint']:
| tensorflow.nn.l2_loss | 4,386 |
import tensorflow as tf
feature_dict = {
k: tf.train.Feature(bytes_list=tf.train.BytesList(
value=[memoryview(v).tobytes()])) for k, v in data.items()}
return tf.train.Example(
features=tf.train.Features(feature=feature_dict)).SerializeToString()
def _deserialize(self, serialized_data, batch_size):
"""Convert serialized TFRecords into tensors.
Args:
| tensorflow.train.Features | 4,387 |
import tensorflow as tf
# make some fake noise
data_size = 100
noise_tensor = tf.random_normal((data_size, INPUT_DIM))
real_data_tensor = tf.random_uniform((data_size, OUTPUT_DIM))
dataset = tf.data.Dataset.from_tensor_slices((noise_tensor, real_data_tensor))
dataset = dataset.repeat().shuffle(10)
# TPUEstimator passes params when calling input_fn
batch_size = params.get('train_batch_size', 16)
| tensorflow.data.Dataset.from_tensor_slices | 4,388 |
import tensorflow as tf
weights_initializer=contrib.layers.variance_scaling_initializer(),
# l2 regularization
weights_regularizer=contrib.layers.l2_regularizer(reg_lambda),
# BN
normalizer_fn=contrib.layers.batch_norm,
normalizer_params={
"is_training": is_training,
"scale": True,
"updates_collections": None
}
):
fc1_1 = contrib.layers.fully_connected(X, 32, scope="fc1_1")
fc1_2 = contrib.layers.fully_connected(X, 32, scope="fc1_2")
if cross_stitch_enabled:
with tf.variable_scope("cross_stitch_1"):
stitch1_1, stitch1_2 = apply_cross_stitch(fc1_1, fc1_2)
else:
stitch1_1, stitch1_2 = fc1_1, fc1_2
fc2_1 = contrib.layers.fully_connected(stitch1_1, 32, scope="fc2_1")
fc2_2 = contrib.layers.fully_connected(stitch1_2, 32, scope="fc2_2")
if cross_stitch_enabled:
with tf.variable_scope("cross_stitch_2"):
stitch2_1, stitch2_2 = apply_cross_stitch(fc2_1, fc2_2)
else:
stitch2_1, stitch2_2 = fc2_1, fc2_2
dropout2_1 = contrib.layers.dropout(stitch2_1, keep_prob=keep_prob, is_training=is_training,
| tensorflow.variable_scope | 4,389 |
import tensorflow as tf
--------
>>> batch_size = 8
>>> x = tf.placeholder(tf.int32, shape=(batch_size, ))
>>> net = tl.layers.EmbeddingInputlayer(inputs=x, vocabulary_size=1000, embedding_size=50, name='embed')
... (8, 50)
"""
def __init__(
self,
inputs=None,
vocabulary_size=80000,
embedding_size=200,
E_init=tf.random_uniform_initializer(-0.1, 0.1),
E_init_args=None,
name='embedding',
):
if E_init_args is None:
E_init_args = {}
super(EmbeddingInputlayer, self).__init__(prev_layer=None, name=name)
logging.info("EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
self.inputs = inputs
with tf.variable_scope(name):
| tensorflow.random_uniform_initializer | 4,390 |
import tensorflow as tf
x0 = tf.to_int32(tf.floor(x))
x1 = x0 + 1
y0 = tf.to_int32(tf.floor(y))
y1 = y0 + 1
z0 = tf.to_int32(tf.floor(z))
| tensorflow.floor | 4,391 |
import tensorflow as tf
def _split_string(string):
"""Splits a byte string into an array of character bytes."""
text = tf.compat.as_text(string)
ret = np.empty(len(text), dtype=np.object)
for i, char in enumerate(text):
ret[i] = tf.compat.as_bytes(char)
return ret
def vocabulary(filename, max_size=None, num_oov_buckets=1):
"""Builds vocabulary and ID lookup tables from the given file."""
def _unique_chars(filename):
"""Returns the used alphabet as an array of strings."""
counts = collections.Counter()
with tf.gfile.Open(filename) as file_:
for line in file_:
counts.update(_split_string(line))
alphabet = [k for (k, _) in counts.most_common(max_size)]
alphabet.sort()
return np.asarray(alphabet, dtype=np.object)
chars, = tf.py_func(_unique_chars, [filename], [tf.string])
char_to_id = tf.contrib.lookup.index_table_from_tensor(
chars, num_oov_buckets=num_oov_buckets)
id_to_char = tf.contrib.lookup.index_to_string_table_from_tensor(chars, " ")
return char_to_id, id_to_char
def characters(filename, batch_size, sequence_size):
| tensorflow.gfile.Open | 4,392 |
import tensorflow as tf
phrase_starts: [batch_size, phrase_length]
vocab_dist: [batch_size, vsize]
attn_dist: [batch_size, phrase_length]
return: [batch_size, phrase_length]
'''
def singel_instance(x):
cur_passage_words = x[0] # [passage_length]
cur_phrase_starts = x[1] # [phrase_length]
cur_vocab_dist = x[2] # [vsize]
cur_attn_dist = x[3] # [passage_length]
# first: get the first word for each phrase
first_words = tf.gather(cur_passage_words, cur_phrase_starts) # [phrase_length]
# second: get the probs for each word
first_word_probs = tf.gather(cur_vocab_dist, first_words) # [phrase_length]
return cur_attn_dist + first_word_probs
elems = (in_passage_words, phrase_starts, vocab_dist, attn_dist)
return tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, phrase_length]
class CovCopyAttenGen:
def __init__(self, placeholders, options, vocab):
self.options = options
self.vocab = vocab
self.cell = tf.contrib.rnn.LSTMCell(
options.gen_hidden_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113),
| tensorflow.gather | 4,393 |
import tensorflow as tf
mean = tools.StreamingMean((), tf.float32, key)
means.append(mean)
updates.append(mean.submit(value))
with tf.control_dependencies(updates):
# message = 'step/' + '/'.join(metrics.keys()) + ' = '
message = '{}: step/{} ='.format(name, '/'.join(metrics.keys()))
gs = tf.train.get_or_create_global_step()
print_metrics = tf.cond(
tf.equal(step % every, 0),
lambda: tf.print(message, [gs] + [mean.clear() for mean in means]),
tf.no_op)
return print_metrics
def collect_initial_episodes(config):
items = config.random_collects.items()
| tensorflow.equal | 4,394 |
import tensorflow as tf
with tf.variable_scope("evaluation"):
accuracy_1 = tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(output_1, axis=-1),
tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1")
accuracy_2 = tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(output_2, axis=-1),
tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2")
accuracy = tf.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy")
with tf.variable_scope("train"):
global_step = tf.get_variable("global_step", shape=(), dtype=tf.int32, trainable=False)
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step)
with tf.variable_scope("summary"):
summary_loss_total = tf.summary.scalar("loss_total", loss_total)
summary_accuracy_test = tf.summary.scalar("accuracy_test", accuracy)
summary_accuracy_train = tf.summary.scalar("accuracy_train", accuracy)
| tensorflow.variable_scope | 4,395 |
import tensorflow as tf
ratio = action_op.prob(self.tfa) / \
(old_action_op.prob(self.tfa) + 1e-5)
# 替代損失
surr = ratio * self.tfadv
# 減少代理損失
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
# log
self.train_writer = tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.tableAction = self.createActionTable()
| tensorflow.clip_by_value | 4,396 |
import tensorflow as tf
return loss
def crf_decode_layer(self, logits, crf_params, nwords):
with tf.name_scope("CRF_decode"):
pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords)
return pred_ids
| tensorflow.name_scope | 4,397 |
import tensorflow as tf
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
if not tf.gfile.Exists(eval_file) or not FLAGS.data_converted:
file_based_convert_examples_to_features(
| tensorflow.gfile.Exists | 4,398 |
from tensorflow.python.estimator.canned import head as head_lib
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
| tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss | 4,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.