seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
from tensorflow.python.ops.rnn_cell_impl import _Linear
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
| tensorflow.python.ops.rnn_cell_impl._Linear | 6,400 |
import tensorflow as tf
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# Setup Index Matrix for one-hot-encoding
identity_mat = tf.diag(tf.ones(shape=[embedding_size]))
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
y_target = tf.placeholder(shape=[1, 1], dtype=tf.float32)
# Text-Vocab Embedding
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0)
# Declare model operations
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
| tensorflow.placeholder | 6,401 |
import tensorflow as tf
# Note this can be implemented in terms of cumprod and cumsum,
# approximately as (ignoring boundary issues and initial_value):
#
# cumsum(decay_prods * sequence) / decay_prods
# where decay_prods = reverse_cumprod(decay)
#
# One reason this hasn't been done is that multiplying then dividing again by
# products of decays isn't ideal numerically, in particular if any of the
# decays are zero it results in NaNs.
with tf.name_scope(name, values=[sequence, decay, initial_value]):
if sequence_lengths is not None:
# Zero out sequence and decay beyond sequence_lengths.
with tf.control_dependencies(
[tf.assert_equal(sequence.shape[0], decay.shape[0])]):
mask = tf.sequence_mask(sequence_lengths, maxlen=sequence.shape[0],
dtype=sequence.dtype)
mask = tf.transpose(mask)
# Adding trailing dimensions to mask to allow for broadcasting.
to_seq = mask.shape.dims + [1] * (sequence.shape.ndims - mask.shape.ndims)
sequence *= tf.reshape(mask, to_seq)
to_decay = mask.shape.dims + [1] * (decay.shape.ndims - mask.shape.ndims)
decay *= tf.reshape(mask, to_decay)
sequences = [sequence, decay]
if reverse:
sequences = [_reverse_seq(s, sequence_lengths) for s in sequences]
| tensorflow.sequence_mask | 6,402 |
import tensorflow as tf
b_init_args = {}
if self.inputs.get_shape().ndims != 2:
raise Exception("The input dimension must be rank 2, please reshape or flatten it")
n_in = int(self.inputs.get_shape()[-1])
with tf.variable_scope(name):
W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args)
if b_init is not None:
try:
b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args)
except Exception: # If initializer is a constant, do not specify shape.
b = tf.get_variable(name='b', initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args)
self.outputs = act(tf.matmul(self.inputs, W) + b)
else:
self.outputs = act(tf.matmul(self.inputs, W))
self.all_layers.append(self.outputs)
if b_init is not None:
self.all_params.extend([W, b])
else:
self.all_params.append(W)
class ReconLayer(DenseLayer):
| tensorflow.get_variable | 6,403 |
import tensorflow as tf
print(imgshape)
self.output_height, self.output_width = imgshape[-3:-1]
self.batch_size = imgshape[1]
featsize = 1024
srcimg = image[0]
tgtimg = image[2]
tgtctx = image[1]
with tf.variable_scope("conv_context") as scope:
tgtctx_h0 = lrelu(conv2d(tgtctx, self.df_dim, name='h0_conv'))
tgtctx_h1 = lrelu(conv2d(tgtctx_h0, self.df_dim*2, name='h1_conv'))
tgtctx_h2 = lrelu(conv2d(tgtctx_h1, self.df_dim*4, name='h2_conv'))
tgtctx_h3 = lrelu(conv2d(tgtctx_h2, self.df_dim*8, name='h3_conv'))
tgtctx_h4 = lrelu(linear(tf.reshape(tgtctx_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
tgtctx_z = linear(tgtctx_h4, featsize, 'hz_lin')
with tf.variable_scope("conv") as scope:
srcimg_h0 = lrelu(conv2d(srcimg, self.df_dim, name='h0_conv'))
srcimg_h1 = lrelu(conv2d(srcimg_h0, self.df_dim*2, name='h1_conv'))
srcimg_h2 = lrelu(conv2d(srcimg_h1, self.df_dim*4, name='h2_conv'))
srcimg_h3 = lrelu(conv2d(srcimg_h2, self.df_dim*8, name='h3_conv'))
print(srcimg_h3.get_shape())
srcimg_h4 = lrelu(linear(tf.reshape(srcimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))
srcimg_z = lrelu(linear(srcimg_h4, featsize, 'hz_lin'))
scope.reuse_variables()
| tensorflow.reshape | 6,404 |
import tensorflow as tf
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
tf.add_to_collection("int_collection", 3)
tf.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self):
test_dir = self._TestDir("saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = tf.train.Saver({"v0": v0}, name="saver0")
saver1 = tf.train.Saver({"v1": v1}, name="saver1")
tf.add_to_collection("savers", saver0)
tf.add_to_collection("savers", saver1)
tf.initialize_all_variables().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = tf.train.export_meta_graph(filename)
| tensorflow.Graph | 6,405 |
import tensorflow as tf
import tensorflow as tf
def _smooth_l1_loss(y_true, y_pred):
t = tf.abs(y_pred - y_true)
return tf.where(t < 1, 0.5 * t ** 2, t - 0.5)
def MultiBoxLoss(num_class=2, neg_pos_ratio=3):
"""multi-box loss"""
def multi_box_loss(y_true, y_pred):
num_batch = tf.shape(y_true)[0]
num_prior = tf.shape(y_true)[1]
loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4])
landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 10])
class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class])
loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4])
landm_true = tf.reshape(y_true[..., 4:14], [num_batch * num_prior, 10])
landm_valid = tf.reshape(y_true[..., 14], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 15], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
| tensorflow.shape | 6,406 |
import tensorflow as tf
end_logits = tf.contrib.layers.layer_norm(end_logits,
begin_norm_axis=-1)
end_logits = tf.layers.dense(
end_logits,
| tensorflow.layers.dense | 6,407 |
import tensorflow as tf
self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.D_B_loss_fake)
self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.D_A_loss_real)
self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.D_A_loss_fake)
self.d_sum = tf.summary.merge(
[self.loss_da_sum, self.da_loss_real_sum, self.da_loss_fake_sum,
self.loss_db_sum, self.db_loss_real_sum, self.db_loss_fake_sum,
self.loss_d_sum]
)
trainable_variables = tf.trainable_variables()
self.d_variables = [var for var in trainable_variables if 'discriminator' in var.name]
self.g_variables = [var for var in trainable_variables if 'generator' in var.name]
print ('Variable printing start :' )
for var in self.d_variables:
print(var.name)
self.test_image_A = tf.placeholder(tf.float32,[None, self.image_size,self.image_size,self.input_dim], name='test_A')
| tensorflow.trainable_variables | 6,408 |
import tensorflow as tf
tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model')
tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set')
tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs')
tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs')
tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every')
tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization')
tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ')
tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information')
tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode')
tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size')
tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ')
tf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set')
tf.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info')
tf.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps')
FLAGS = tf.app.flags.FLAGS
slim = tf.contrib.slim
AUTOENCODER = 'ae'
PREDICTIVE = 'pred'
DENOISING = 'noise'
| tensorflow.app.flags.DEFINE_float | 6,409 |
import tensorflow as tf
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=tf.Graph()) as sess:
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [tf.constant(i + 1, tf.int32, shape=[batch_size])
| tensorflow.set_random_seed | 6,410 |
import tensorflow as tf
saved_shapes = reader.get_variable_to_shape_map()
model_names = tf.model_variables() # Used by tf.slim layers
if not len(tf.model_variables()):
model_names = tf.global_variables() # Fallback when slim is not used
model_names = set([v.name.split(':')[0] for v in model_names])
checkpoint_names = set(saved_shapes.keys())
| tensorflow.global_variables | 6,411 |
import tensorflow as tf
with tf.name_scope('image_preprocess'):
if image.dtype.base_dtype != tf.float32:
image = tf.cast(image, tf.float32)
mean = [0.485, 0.456, 0.406] # rgb
std = [0.229, 0.224, 0.225]
if self.image_bgr:
mean = mean[::-1]
std = std[::-1]
image_mean = tf.constant(mean, dtype=tf.float32) * 255.
image_std = tf.constant(std, dtype=tf.float32) * 255.
image = (image - image_mean) / image_std
return image
@staticmethod
def compute_loss_and_error(logits, label, label_smoothing=0.):
if label_smoothing != 0.:
nclass = logits.shape[-1]
label = tf.one_hot(label, nclass) if label.shape.ndims == 1 else label
| tensorflow.constant | 6,412 |
import tensorflow as tf
y_pairs = tf.reshape(output, [-1,2]) # fold: 1 x n -> [n/2 x 2]
pos_scores, neg_scores = tf.split(1, 2, y_pairs) # separate pairs
hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores)
total_hinge_loss = tf.reduce_sum(hinge_losses)
return output, total_hinge_loss
| tensorflow.reduce_sum | 6,413 |
from tensorflow.python.ops import math_ops
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels = tensor_util.remove_squeezable_dimensions(
predictions, labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return streaming_mean(is_correct, weights, metrics_collections,
updates_collections, name or 'accuracy')
@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
def streaming_precision(predictions, labels, ignore_mask=None, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
| tensorflow.python.ops.math_ops.cast | 6,414 |
import tensorflow as tf
res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-slbo{T}/", force=True, video_callable=lambda episode_id: True), policy)
print ('test_summary_slbo:', test_summary['slbo'][TEST_TASK_NUM])
if not test:
np.save(f'{setting}/{taskname}.task{TASK_NUM}.saver', saver.state_dict())
np.save(f'{setting}/{taskname}.final.saver', saver.state_dict())
if init_generator and TASK_NUM==0:
print ('finished init generator!')
exit(0)
pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])
print ("After SLBO, pol_params_norm:", np.linalg.norm(pol_params), "warm_params_norm:", np.linalg.norm(warm_params))
eval_rollout(runners['train'], policy, 'Use optimal policy to collect data from real env')
optimal_collect_real = []
t3 = time.time()
slbo_time = t3 - t2
evaluate(settings, 'post-slbo')
logger.info(f'Warmup time = {warmup_time}, SLBO time = {slbo_time}')
| tensorflow.get_default_session | 6,415 |
import tensorflow as tf
print_obj(
"grouped_minibatch_stddev",
"grouped_image",
grouped_image
)
# Find the mean of each group.
# shape = (
# 1,
# cur_batch_size / group_size,
# image_size,
# image_size,
# num_channels
# )
grouped_mean = tf.reduce_mean(
input_tensor=grouped_image,
axis=0,
keepdims=True,
name="grouped_mean"
)
print_obj(
"grouped_minibatch_stddev", "grouped_mean", grouped_mean
)
# Center each group using the mean.
# shape = (
# group_size,
# cur_batch_size / group_size,
| tensorflow.reduce_mean | 6,416 |
import tensorflow as tf
self._initial_state_name = util.with_prefix(self._name, "initial")
self._final_state_name = util.with_prefix(self._name, "final")
util.export_state_tuples(self._initial_state, self._initial_state_name)
util.export_state_tuples(self._final_state, self._final_state_name)
def import_ops(self):
"""Imports ops from collections."""
if self._is_training:
self._train_op = tf.get_collection_ref("train_op")[0]
self._lr = tf.get_collection_ref("lr")[0]
self._new_lr = tf.get_collection_ref("new_lr")[0]
self._lr_update = tf.get_collection_ref("lr_update")[0]
rnn_params = tf.get_collection_ref("rnn_params")
if self._cell and rnn_params:
params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(
self._cell,
self._cell.params_to_canonical,
self._cell.canonical_to_params,
rnn_params,
base_variable_scope="Model/RNN")
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
self._cost = tf.get_collection_ref(util.with_prefix(self._name, "cost"))[0]
| tensorflow.get_collection_ref | 6,417 |
import tensorflow as tf
mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32)
start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32)
start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32)
mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]],
[cutout_size + start_x[0], im_width - start_x[0]]])
mask = mask[cutout_size: cutout_size + im_height,
cutout_size: cutout_size + im_width]
mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3))
image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image))
return image
def _add_drop_path(self, X, keep_prob):
with tf.variable_scope('drop_path'):
batch_size = tf.shape(X)[0]
noise_shape = (batch_size, 1, 1, 1)
random_tensor = keep_prob + tf.random_uniform(noise_shape, dtype=tf.float32)
| tensorflow.zeros_like | 6,418 |
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
| tensorflow.contrib.cluster_resolver.TPUClusterResolver | 6,419 |
import tensorflow as tf
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Variable, true indicates training phase
scope: string, variable scope
affn: whether to affn-transform outputs
Return:
normed: batch-normalized maps
Ref: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow/33950177
"""
name = 'batch_norm'
with tf.variable_scope(name):
phase_train = tf.convert_to_tensor(phase_train, dtype=tf.bool)
n_out = int(x.get_shape()[3])
beta = tf.Variable(tf.constant(0.0, shape=[n_out], dtype=x.dtype),
name=name+'/beta', trainable=True, dtype=x.dtype)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out], dtype=x.dtype),
name=name+'/gamma', trainable=True, dtype=x.dtype)
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.9)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
| tensorflow.convert_to_tensor | 6,420 |
import tensorflow as tf
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'Output shape must have a batch dimension'):
coord.join()
def test_output_must_have_same_batch_dimension_size_as_input(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(_):
return tf.constant([1, 2, 3, 4])
output = f(tf.constant([1]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
'Output shape must have the same batch dimension as the input batch '
'size. Expected: 1 Observed: 4'):
coord.join()
| tensorflow.constant | 6,421 |
import tensorflow as tf
if is_training:
return_dict["start_log_probs"] = start_log_probs
return_dict["end_log_probs"] = end_log_probs
else:
return_dict["start_top_log_probs"] = start_top_log_probs
return_dict["start_top_index"] = start_top_index
return_dict["end_top_log_probs"] = end_top_log_probs
return_dict["end_top_index"] = end_top_index
# an additional layer to predict answerability
with tf.variable_scope("answer_class"):
# get the representation of CLS
cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32)
cls_feature = tf.einsum("lbh,bl->bh", output, cls_index)
# get the representation of START
start_p = tf.nn.softmax(start_logits_masked, axis=-1,
name="softmax_start")
start_feature = tf.einsum("lbh,bl->bh", output, start_p)
# note(zhiliny): no dependency on end_feature so that we can obtain
# one single `cls_logits` for each sample
ans_feature = tf.concat([start_feature, cls_feature], -1)
ans_feature = tf.layers.dense(
ans_feature,
xlnet_config.d_model,
| tensorflow.einsum | 6,422 |
import tensorflow as tf
b = None
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
if split == 1:
conv = tf.nn.conv2d(inputdata, w, strides, padding, data_format=data_format)
else:
inputs = tf.split(inputdata, split, channel_axis)
kernels = tf.split(w, split, 3)
outputs = [tf.nn.conv2d(i, k, strides, padding, data_format=data_format)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
ret = tf.identity(tf.nn.bias_add(conv, b, data_format=data_format)
if use_bias else conv, name=name)
return ret
| tensorflow.split | 6,423 |
import tensorflow as tf
sigma = tf.layers.dense(input_tf, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance
return mu,sigma;
class PolicyEstimator_MountainCarContinuous():
def __init__(self, entropy_beta=0.1, learning_rate=0.001, par_idx=0,scope="policy_estimator"):
w_init = tf.random_normal_initializer(0.,.1);
with tf.variable_scope(scope+"_"+str(par_idx)):
# state, target and action
self.state = tf.placeholder(tf.float32, [None,400], name="state")
self.target = tf.placeholder(tf.float32,[None,1], name="target")
self.a_his = tf.placeholder(tf.float32, [None, num_action], name="action_hist")
# layers
# wrap output
self.mu = self.mu * action_bound[1];
self.sigma = self.sigma + 1e-5
self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)
self.action = tf.squeeze(self.normal_dist.sample(1),axis=0);
| tensorflow.placeholder | 6,424 |
import tensorflow as tf
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
| tensorflow.nn.softmax | 6,425 |
from tensorflow.contrib.eager.python import tfe
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
| tensorflow.contrib.eager.python.tfe.num_gpus | 6,426 |
from tensorflow.python.ops import init_ops
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
| tensorflow.python.ops.init_ops.constant_initializer | 6,427 |
import tensorflow as tf
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
learning_rate = self.learning_rate.value()
unperturbed_loss = fn_loss(**arguments)
deltas = [tf.zeros_like(tensor=variable) for variable in variables]
previous_perturbations = [tf.zeros_like(tensor=variable) for variable in variables]
if self.unroll_loop:
# Unrolled for loop
for sample in range(self.num_samples):
with tf.control_dependencies(control_inputs=deltas):
perturbations = [
tf.random_normal(shape=util.shape(variable)) * learning_rate
for variable in variables
]
perturbation_deltas = [
pert - prev_pert
for pert, prev_pert in zip(perturbations, previous_perturbations)
]
applied = self.apply_step(variables=variables, deltas=perturbation_deltas)
previous_perturbations = perturbations
| tensorflow.control_dependencies | 6,428 |
import tensorflow as tf
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
| tensorflow.reduce_sum | 6,429 |
import tensorflow as tf
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
| tensorflow.to_int32 | 6,430 |
import tensorflow as tf
states = tf.TensorArray(dtype=tf.float32, size=time_steps)
weights = tf.TensorArray(dtype=tf.float32, size=time_steps)
attns = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_symbol = inputs.read(0) # first symbol is BOS
initial_input = embed(initial_symbol)
initial_pos = tf.zeros([batch_size], tf.float32)
initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2])
zero_context = tf.zeros(shape=tf.shape(attention_states[align_encoder_id][:,0])) # FIXME
with tf.variable_scope('decoder_{}'.format(decoder.name)):
initial_context, _ = look(0, initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights,
| tensorflow.zeros | 6,431 |
import tensorflow as tf
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
input_var = tf.reshape(input_var,[-1,dims])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=0)
t = tf.matmul(input_var,v_norm)
mu,var = tf.nn.moments(t,axes=[0])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0)
return tf.matmul(input_var,w)+self.b
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class SymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
| tensorflow.cond | 6,432 |
import tensorflow as tf
lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1)
clf_h = tf.reshape(h, [-1, n_embd])
pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32)
clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx)
clf_h = tf.reshape(clf_h, [-1, 2, n_embd])
if train and clf_pdrop > 0:
shape = shape_list(clf_h)
shape[1] = 1
clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape)
clf_h = tf.reshape(clf_h, [-1, n_embd])
clf_logits = clf(clf_h, 1, train=train)
clf_logits = tf.reshape(clf_logits, [-1, 2])
clf_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=clf_logits, labels=Y)
return clf_logits, clf_losses, lm_losses
def mgpu_train(*xs):
gpu_ops = []
gpu_grads = []
xs = (tf.split(x, n_gpu, 0) for x in xs)
for i, xs in enumerate(zip(*xs)):
do_reuse = True if i > 0 else None
with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse):
clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse)
if lm_coef > 0:
train_loss = tf.reduce_mean(clf_losses) + lm_coef*tf.reduce_mean(lm_losses)
else:
train_loss = tf.reduce_mean(clf_losses)
| tensorflow.nn.sparse_softmax_cross_entropy_with_logits | 6,433 |
import tensorflow as tf
num_error_rate = tf.get_variable(
'num_error_rate', [], initializer=tf.constant_initializer(0.), trainable=False)
batch_size_train = self.cnf['batch_size_train']
batch_size_val = self.cnf['batch_size_test']
self.end_points_G = self.model.generator([batch_size_train, 100], True, None, batch_size_val)
if gpu_idx == 0:
G_means = tf.reduce_mean(self.end_points_G['softmax'], 0, keep_dims=True)
G_vars = tf.reduce_mean(tf.square(self.end_points_G['softmax'] - G_means), 0, keep_dims=True)
G = tf.Print(
self.end_points_G['softmax'],
[tf.reduce_mean(G_means), tf.reduce_mean(G_vars)],
"generator mean and average var",
first_n=1)
inputs_means = tf.reduce_mean(inputs, 0, keep_dims=True)
inputs_vars = tf.reduce_mean(tf.square(inputs - inputs_means), 0, keep_dims=True)
inputs = tf.Print(
inputs,
[tf.reduce_mean(inputs_means), tf.reduce_mean(inputs_vars)],
"image mean and average var",
first_n=1)
joint = tf.concat([inputs, G], 0)
| tensorflow.reduce_mean | 6,434 |
import tensorflow as tf
img_shape = inputs_list[i][-2:]
h_crop = tf.reduce_max(img_shape[0])
w_crop = tf.reduce_max(img_shape[1])
img = tf.image.crop_to_bounding_box(image=img,
offset_height=0,
offset_width=0,
target_height=tf.cast(h_crop, tf.int32),
target_width=tf.cast(w_crop, tf.int32))
outputs = fcos.build_whole_detection_network(input_img_batch=img,
gtboxes_batch_h=gtboxes_and_label_h,
gtboxes_batch_r=gtboxes_and_label_q,
gpu_id=i)
gtboxes_in_img_q = self.drawer.draw_boxes_with_categories(
| tensorflow.cast | 6,435 |
import tensorflow as tf
bounds=(0,1)):
(clip_min, clip_max)=bounds
grad, = tf.gradients(loss, x)
if ord == 1:
red_ind = list(range(1, len(x.get_shape())))
avoid_zero_div = 1e-8
avoid_nan_norm = tf.maximum(avoid_zero_div,
reduce_sum(tf.abs(grad),
reduction_indices=red_ind,
keepdims=True))
normalized_grad = old_div(grad, avoid_nan_norm)
elif ord == 2:
red_ind = list(range(1, len(x.get_shape())))
avoid_zero_div = 1e-8
square = tf.maximum(avoid_zero_div,
reduce_sum(tf.square(grad),
reduction_indices=red_ind,
| tensorflow.abs | 6,436 |
from tensorflow.python.ops import array_ops
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
| tensorflow.python.ops.array_ops.expand_dims | 6,437 |
import tensorflow as tf
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0.0))
q_func_results = q_func(observations_ph.get(), num_actions, scope="q_func")
q_values = q_func_results['q']
s_value = q_func_results['s']
a_values = q_func_results['a']
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=[output_actions, q_values, s_value, a_values, update_eps_expr],
givens={update_eps_ph: test_epsilon, stochastic_ph: False},
updates=[update_eps_expr])
return act
| tensorflow.stack | 6,438 |
import tensorflow as tf
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
| tensorflow.reshape | 6,439 |
import tensorflow as tf
Returns
-------
A tuple length of 3, (normalized_tensor, mean, variance).
"""
mean, var = tf.nn.moments(
x, reduction_axes, shift=None, name=None, keep_dims=False)
if sorted(reduction_axes) == range(ndim(x))[:-1]:
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
for axis in range(get_ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(tf.shape(x)[axis])
target_shape = stack(target_shape)
broadcast_mean = tf.reshape(mean, target_shape)
broadcast_var = tf.reshape(var, target_shape)
broadcast_gamma = tf.reshape(gamma, target_shape)
broadcast_beta = tf.reshape(beta, target_shape)
normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones tensor variable and returns it.
| tensorflow.shape | 6,440 |
import tensorflow as tf
embedding = config.embeddings.add()
# Specifiy the embedding variable and the metadata
embedding.tensor_name = embedding_name
embedding.metadata_path = path_to_meta
# Project the embeddings to space dimensions for visualization
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config)
def add_train_stats(model, hparams):
with tf.variable_scope("stats") as scope:
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i])
tf.summary.scalar("before_loss", model.before_loss)
tf.summary.scalar("after_loss", model.after_loss)
if hparams.predict_linear:
tf.summary.scalar("linear_loss", model.linear_loss)
for i in range(hparams.tacotron_num_gpus):
tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i])
tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i])
tf.summary.scalar("regularization_loss", model.regularization_loss)
| tensorflow.summary.histogram | 6,441 |
import tensorflow as tf
# Runs train_op.
train_op = optimizer.minimize(loss)
sess.run(train_op)
def testGraphExtension(self):
self._testGraphExtensionSave()
self._testGraphExtensionRestore()
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = tf.Variable(0.0)
var = tf.Variable(10.0)
tf.add(v0, var)
@function.Defun(x=tf.float32)
def minus_one(x):
return x - 1
minus_one(tf.identity(v0))
save = tf.train.Saver({"v0": v0})
tf.initialize_all_variables()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
self.assertEqual(ops, ["Add", "Assign", "Const", "Identity", "NoOp",
"RestoreSlice", "SaveSlices", "Sub", "Variable"])
| tensorflow.add | 6,442 |
from tensorflow.compat.v1 import ConfigProto, InteractiveSession
import pickle
from tensorflow.compat.v1 import ConfigProto, InteractiveSession
import tensorflow as tf
from speech_utils.ACRNN.tf.model_utils import train
config = ConfigProto(log_device_placement=True)
config.gpu_options.allow_growth = True
session = tf.Session(config=config).as_default()
def main(args):
# Verify
| tensorflow.compat.v1.ConfigProto | 6,443 |
from tensorflow.python.framework import ops
recall = compute_recall(true_positives, false_negatives, 'value')
with ops.control_dependencies([true_positives_update_op,
false_negatives_update_op]):
update_op = compute_recall(true_positives, false_negatives, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
| tensorflow.python.framework.ops.add_to_collections | 6,444 |
import tensorflow as tf
# dtype=tf.float32,
# initializer=tf.contrib.layers.xavier_initializer(),
# regularizer=tf.contrib.layers.l2_regularizer(self.params["regularizer_rate"]),
# trainable=True
# )
embeddings = tf.nn.embedding_lookup(embedding_variable, word_ids)
return embeddings
def dropout_layer(self, data):
training = self.mode == tf.estimator.ModeKeys.TRAIN
output = tf.layers.dropout(data, rate=self.params["dropout"], training=training)
return output
def layer_normalization_layer(self, data):
output = tf.contrib.layers.layer_norm(data)
return output
def dense_layer(self, data, num_tags):
logits = tf.layers.dense(data, num_tags)
| tensorflow.layers.dropout | 6,445 |
import tensorflow as tf
sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
| tensorflow.get_collection | 6,446 |
import tensorflow as tf
batch_size = tf.cast(tf.shape(pred)[0], tf.float32)
loss_val = tf.sqrt(2 * tf.nn.l2_loss(pred - labels)) / batch_size
return loss_val
def pullaway_loss(embeddings, name='pullaway_loss'):
"""Pull Away loss calculation.
Args:
embeddings: The embeddings to be orthogonalized for varied faces.
Shape [batch_size, embeddings_dim]
Return: pull away term loss
"""
with tf.name_scope(name):
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)
batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
pt_loss = (tf.reduce_sum(similarity) - batch_size) / \
(batch_size * (batch_size - 1))
return pt_loss
def log_sum_exp(x):
"""numerically stable log_sum_exp implementation that prevents overflow."""
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis)
| tensorflow.name_scope | 6,447 |
import tensorflow as tf
shift=shift,
name="batch_norm_ss")
mean, variance = tf.nn.normalize_moments(counts,
shifted_sum_x,
shifted_sum_x2,
shift,
name="normalize_moments")
return mean, variance
def build_moving_stats():
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
)
mean, variance = utils.smart_cond(
use_batch_stats,
build_batch_stats,
build_moving_stats,
)
return mean, variance
def _build_statistics_second_moment(self, input_batch,
| tensorflow.identity | 6,448 |
import tensorflow as tf
:param decoder_states: internal states of the decoder, tensor of shape (batch_size, time_steps, state_size)
:param reward: reward for each time step, tensor of shape (batch_size, time_steps)
:return: reward - computed baseline, tensor of shape (batch_size, time_steps)
"""
# batch_size = tf.shape(decoder_states)[0]
# time_steps = tf.shape(decoder_states)[1]
# state_size = decoder_states.get_shape()[2]
# states = tf.reshape(decoder_states, shape=tf.stack([batch_size * time_steps, state_size]))
baseline = dense(tf.stop_gradient(decoder_states), units=1, activation=None, name='reward_baseline',
kernel_initializer=tf.constant_initializer(0.01))
baseline = tf.squeeze(baseline, axis=2)
# baseline = tf.reshape(baseline, shape=tf.stack([batch_size, time_steps]))
return reward - baseline
def baseline_loss(rewards, weights, average_across_timesteps=False, average_across_batch=True):
"""
| tensorflow.stop_gradient | 6,449 |
import tensorflow as tf
return weighted_average, weights
def no_attention(state, hidden_states, *args, **kwargs):
batch_size = tf.shape(state)[0]
weighted_average = tf.zeros(shape=tf.stack([batch_size, 0]))
weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]])
return weighted_average, weights
def average_attention(hidden_states, encoder_input_length, *args, **kwargs):
# attention with fixed weights (average of all hidden states)
lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1])
weights = tf.to_float(mask) / lengths
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs):
weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1])
weights = tf.to_float(weights)
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
| tensorflow.expand_dims | 6,450 |
import tensorflow as tf
'Optimizer to use: momentum or sgd or rmsprop')
tf.flags.DEFINE_float('learning_rate', None,
| tensorflow.flags.DEFINE_float | 6,451 |
import tensorflow as tf
if not phase_train:
if FLAGS.forward_only:
all_logits = tf.concat(all_logits, 0)
fetches = [all_logits] + enqueue_ops
else:
all_top_1_ops = tf.reduce_sum(all_top_1_ops)
all_top_5_ops = tf.reduce_sum(all_top_5_ops)
fetches = [all_top_1_ops, all_top_5_ops] + enqueue_ops
return (enqueue_ops, fetches)
extra_nccl_ops = []
apply_gradient_devices, gradient_state = (
self.variable_mgr.preprocess_device_grads(device_grads))
| tensorflow.reduce_sum | 6,452 |
import tensorflow as tf
if self.config.l2_norm is not None:
self.logger.info("applying l2 loss")
variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables)
self.loss += l2_loss
| tensorflow.contrib.layers.apply_regularization | 6,453 |
import tensorflow as tf
else:
q = tf.nn.softmax(q_logits)
p = tf.nn.softmax(p_logits)
kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
kl.get_shape().assert_has_rank(2)
weights.get_shape().assert_has_rank(1)
loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl')
return loss
def cross_entropy_sequence_loss(logits, targets, sequence_length):
"""Calculates the per-example cross-entropy loss for a sequence of logits and
masks out all losses passed the sequence length.
Args:
logits: Logits of shape `[T, B, vocab_size]`
| tensorflow.expand_dims | 6,454 |
import tensorflow as tf
def conv3d(layer_name, x, out_channels, kernel_size=[1,3,3], strides=[1,1,1,1,1], data_format='NDHWC', is_pretrain=True):
'''
Convolution 3D op wrapper, use RELU activation after convolution
'''
in_channels = x.get_shape()[-1].value
with tf.variable_scope(layer_name):
w = tf.get_variable(name='weight',
trainable=is_pretrain,
shape=[kernel_size[0],kernel_size[1],kernel_size[2],in_channels,out_channels],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='bias',
trainable=is_pretrain,
shape=[out_channels],
initializer=tf.contrib.layers.xavier_initializer())
x = tf.nn.conv3d(x, w, strides=strides, padding='SAME', data_format=data_format, name='conv3d')
x = tf.nn.bias_add(x, b, name='bias_add')
x = tf.nn.relu(x, name='relu')
return x
| tensorflow.contrib.layers.xavier_initializer | 6,455 |
import tensorflow as tf
elif norm == 'G':
X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)
if dropout > 0.0:
X = tf.layers.dropout(X, dropout, training=is_train)
if slope < 1.0:
X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X)
return X
def decoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00,
use_bias=True):
with tf.variable_scope(name) as scope:
if scale > 1:
X = self.t_conv(name + '_upsample', X, filter, scale, scale, (not norm) and use_bias, "VALID", stddev)
else:
X = self.t_conv(name + '_deconf', X, filter, f_size, 1, (not norm) and use_bias, "VALID", stddev)
if norm == 'I':
X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse)
elif norm == 'B':
X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name)
elif norm == 'G':
X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)
| tensorflow.variable_scope | 6,456 |
import tensorflow as tf
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
| tensorflow.reduce_sum | 6,457 |
import tensorflow as tf
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
| tensorflow.VarLenFeature | 6,458 |
import tensorflow as tf
m = mlow + mhigh
m += tf.to_float(idx >= encoder_input_length)
mask = tf.to_float(tf.equal(m, 0.0))
e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs)
weights = softmax(e, mask=mask)
if encoder.attn_window_size > 0:
sigma = encoder.attn_window_size / 2
numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32))
div = tf.truediv(numerator, 2 * sigma ** 2)
weights *= tf.exp(div) # result of the truncated normal distribution
# normalize to keep a probability distribution
# weights /= (tf.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
return weighted_average, weights
def attention(encoder, scope=None, **kwargs):
attention_functions = {
'global': global_attention,
'local': local_attention,
| tensorflow.exp | 6,459 |
import tensorflow as tf
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
| tensorflow.greater_equal | 6,460 |
import tensorflow as tf
# AC net
def build_anet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
| tensorflow.contrib.layers.l2_regularizer | 6,461 |
import tensorflow as tf
enable_pretrain = tf.cast(
tf.greater_equal(global_step, FLAGS.first_pretrain_steps), tf.float32)
loss = src_loss * tf.stop_gradient(loss_weights) * enable_pretrain
loss += dst_loss + l2_loss
| tensorflow.stop_gradient | 6,462 |
import tensorflow as tf
f1 = tf.reduce_sum(half(masked, 0), 2) / tf.reduce_sum(half(mask, 0))
f2 = tf.reduce_sum(half(masked, 1), 2) / tf.reduce_sum(half(mask, 1))
return tf.concat([x, f1, f2], 1)
def batch_norm(x, train, name, decay=0.99, epsilon=1e-5):
shape = x.get_shape().as_list()
with tf.variable_scope(name):
beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.))
gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02))
pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False)
pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False)
if pop_mean not in tf.moving_average_variables():
tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean)
tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var)
def func1():
# execute at training time
batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1))
| tensorflow.constant_initializer | 6,463 |
import tensorflow as tf
zs = entropy_bottleneck.decompress(z_strings, z_min_v, z_max_v, z_shape, z_shape[-1])
print("Entropy Decoder (Hyper)")
def loop_hyper_deocder(z):
z = tf.expand_dims(z, 0)
loc, scale = hyper_decoder(z)
return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])
locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32),
parallel_iterations=1, back_prop=False)
lower_bound = 1e-9# TODO
scales = tf.maximum(scales, lower_bound)
print("Hyper Decoder")
| tensorflow.squeeze | 6,464 |
from tensorflow.python.ops import init_ops
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
| tensorflow.python.ops.init_ops.constant_initializer | 6,465 |
import tensorflow as tf
# move to numpy
cand_feats = cand_feats.numpy()
n_cands = n_cands.numpy()
# feature normalization
cand_feats = (cand_feats - policy['feat_shift']) / policy['feat_scale']
pred_scores = policy['model'].predict(cand_feats)
# move back to TF
pred_scores = tf.convert_to_tensor(pred_scores.reshape((1, -1)), dtype=tf.float32)
# padding
pred_scores = padding(pred_scores, n_cands)
true_scores = padding(tf.reshape(cand_scores, (1, -1)), n_cands)
true_bestscore = tf.reduce_max(true_scores, axis=-1, keepdims=True)
assert all(true_bestscore.numpy() == np.take_along_axis(true_scores.numpy(), best_cands.numpy().reshape((-1, 1)), axis=1))
kacc = []
for k in top_k:
pred_top_k = tf.nn.top_k(pred_scores, k=k)[1].numpy()
pred_top_k_true_scores = np.take_along_axis(true_scores.numpy(), pred_top_k, axis=1)
kacc.append(np.mean(np.any(pred_top_k_true_scores == true_bestscore.numpy(), axis=1)))
kacc = np.asarray(kacc)
batch_size = int(n_cands.shape[0])
mean_kacc += kacc * batch_size
n_samples_processed += batch_size
| tensorflow.reduce_max | 6,466 |
from tensorflow.python.ops import array_ops
0, (ones[:dim], (multiple,), ones[dim:]), name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
| tensorflow.python.ops.array_ops.tile | 6,467 |
import tensorflow as tf
'Output shape must have a batch dimension'):
coord.join()
def test_output_must_have_same_batch_dimension_size_as_input(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(_):
return tf.constant([1, 2, 3, 4])
output = f(tf.constant([1]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
'Output shape must have the same batch dimension as the input batch '
'size. Expected: 1 Observed: 4'):
coord.join()
| tensorflow.train.Coordinator | 6,468 |
import tensorflow as tf
#theta_tensor = tf.get_default_graph().get_tensor_by_name('spt_trans_theta')
cls_score, cls_prob, bbox_pred, rois = sess.run([net.get_output('cls_score'),
net.get_output('cls_prob'), net.get_output('bbox_pred'), net.get_output('rois')],
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if (save_vis_dir is not None and os.path.exists(save_vis_dir)):
# first get the weights out
with tf.variable_scope('conv5_3', reuse=True) as scope:
conv5_3_weights = tf.get_variable("weights")
conv5_3_weights_np, conv5_3_features, st_pool_features =\
sess.run([conv5_3_weights, net.get_output('conv5_3'), net.get_output('pool_5')],
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
np.save(os.path.join(save_vis_dir, '%s_conv5_3_w.npy' % img_name), conv5_3_weights_np)
np.save(os.path.join(save_vis_dir, '%s_conv5_3_f.npy' % img_name), conv5_3_features)
np.save(os.path.join(save_vis_dir, '%s_st_pool_f.npy' % img_name), st_pool_features)
| tensorflow.get_variable | 6,469 |
import tensorflow as tf
if monitorSession:
# MonitoredSession
# this will restore all the variables from the latest checkpoint if it exists
self._fix_checkpoint_abs_to_rel(self._checkpoint_dir) # need to ensure checkpoint has relative path saved
chiefsess_creator = tf.train.ChiefSessionCreator(config=sess_config, checkpoint_dir=self._checkpoint_dir)
if self._restore_chkptfile is not None:
self._network.init_saver()
# this is restoring variables
self.sess = tf.train.MonitoredSession(session_creator=chiefsess_creator, hooks=self.hooks)
# Restore from some checkpoint
if self._restore_chkptfile is not None:
raw_sess = self.get_raw_session()
if raw_sess.run(self.global_step) == 0:
self._network.restore(raw_sess, self._restore_chkptfile)
else:
self.sess = tf.Session(config=sess_config)
#all_variables = tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES)
#self.sess.run(tf.variables_initializer(all_variables))
| tensorflow.train.MonitoredSession | 6,470 |
import tensorflow as tf
def squeeze_nxn(input_, n_factor=2):
"""Squeezing operation: reshape to convert space to channels."""
if isinstance(input_, (float, int)):
return input_
shape = input_.get_shape().as_list()
batch_size = shape[0]
height = shape[1]
width = shape[2]
channels = shape[3]
if height % n_factor != 0:
raise ValueError("Height not divisible by %d." % n_factor)
if width % n_factor != 0:
raise ValueError("Width not divisible by %d." % n_factor)
res = tf.reshape(
input_,
[batch_size,
height // n_factor,
n_factor, width // n_factor,
n_factor, channels])
res = tf.transpose(res, [0, 1, 3, 5, 2, 4])
res = tf.reshape(
res,
[batch_size,
height // n_factor,
width // n_factor,
channels * n_factor * n_factor])
return res
| tensorflow.reshape | 6,471 |
import tensorflow as tf
self.outputs = prev_layer.outputs
# self.all_layers = list(layer.all_layers)
# self.all_params = list(layer.all_params)
# self.all_drop = dict(layer.all_drop)
else:
self.inputs = prev_layer.outputs
logging.info("GaussianNoiseLayer %s: mean:%f stddev:%f" % (self.name, mean, stddev))
with tf.variable_scope(name):
# noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape())
noise = tf.random_normal(shape=self.inputs.get_shape(), mean=mean, stddev=stddev, seed=seed)
self.outputs = self.inputs + noise
# self.all_layers = list(layer.all_layers)
# self.all_params = list(layer.all_params)
# self.all_drop = dict(layer.all_drop)
| tensorflow.variable_scope | 6,472 |
import tensorflow as tf
return pt_loss
def log_sum_exp(x):
"""numerically stable log_sum_exp implementation that prevents overflow."""
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis)
m2 = tf.reduce_max(x, axis, keep_dims=True)
return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))
def log_prob_from_logits(x):
| tensorflow.reduce_max | 6,473 |
import tensorflow as tf
print(f"Got {len(sync_lp_observations)} observations in {sync_lp_time:.2f}s")
# %% [markdown]
# ## Comparison
# To compare outcomes of sync and async runs, let's plot their respective regrets side by side, and print out the running time. For this toy problem we expect async scenario to run a little bit faster on machines with multiple CPU.
# %%
from util.plotting import plot_regret
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2)
sync_lp_min_idx = tf.squeeze(tf.argmin(sync_lp_observations, axis=0))
async_lp_min_idx = tf.squeeze(tf.argmin(async_lp_observations, axis=0))
plot_regret(
sync_lp_observations.numpy(), ax[0], num_init=len(initial_data), idx_best=sync_lp_min_idx
)
ax[0].set_yscale("log")
ax[0].set_ylabel("Regret")
ax[0].set_ylim(0.0000001, 100)
ax[0].set_xlabel("# evaluations")
ax[0].set_title(f"Sync LP, {len(sync_lp_observations)} points, time {sync_lp_time:.2f}")
plot_regret(
| tensorflow.argmin | 6,474 |
import tensorflow as tf
name="pool")
pooled_outputs.append(pooled)
with tf.name_scope("preFc"):
# combine all pooled outputs
total_filters = num_filter * len(filter_list)
# concat all the pooled weights
H_pool = tf.concat(pooled_outputs, 3)
#flatten it for fully connected layer
H_pool_flat = tf.reshape(H_pool, [-1, total_filters])
with tf.name_scope("dropout"):
H_drop = tf.nn.dropout(H_pool_flat, keep_prob = keep_prob)
# Final (unnormalized) layer
with tf.name_scope("output"):
W = tf.get_variable("W",
shape=[total_filters, nb_classes],
initializer=tf.contrib.layers.xavier_initializer())
# add final layer bias
b = tf.Variable(tf.constant(0.1, shape=[nb_classes]), name="b")
# calc l2 losses
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
| tensorflow.nn.dropout | 6,475 |
import tensorflow as tf
channels = inp.get_shape().as_list()[3]
with tf.variable_scope(name):
moving_mean = get_variable("mean", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0), trainable=False)
moving_variance = get_variable("var", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), trainable=False)
offset = get_variable("offset", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
scale = get_variable("scale", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), regularizer=tf.nn.l2_loss)
| tensorflow.constant_initializer | 6,476 |
import tensorflow as tf
def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = 1 + span_ends - span_starts # [k]
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
span_indices = tf.expand_dims(tf.range(self.config["max_span_width"]), 0) + tf.expand_dims(span_starts, 1) # [k, max_span_width]
span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width]
span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb]
with tf.variable_scope("head_scores"):
self.head_scores = util.projection(context_outputs, 1) # [num_words, 1]
span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]
span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1]
span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]
span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]
| tensorflow.nn.dropout | 6,477 |
from tensorflow.python.framework import tensor_shape
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
"""Common shape function for arg-reduction ops."""
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
elif input_shape.ndims <= 1:
return [tensor_shape.scalar()]
dimension = tensor_util.ConstantValue(op.inputs[1])
if dimension is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
elif 0 <= dimension and dimension < input_shape.ndims:
returned_shape = []
for i, dim in enumerate(input_shape.dims):
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
| tensorflow.python.framework.tensor_shape.scalar | 6,478 |
import tensorflow as tf
"""Wrap the cell in multilayer"""
cell=tf.nn.rnn_cell.MultiRNNCell([get_a_cell(state_size,input_prob,state_prob,input_size_x if layer==0 else state_size) for layer in range(num_layers)],state_is_tuple=True)
cell=tf.nn.rnn_cell.DropoutWrapper(cell,variational_recurrent=True,dtype=tf.float32,input_size=input_size_x,output_keep_prob=output_prob)
init_state = cell.zero_state(batch_size, dtype=tf.float32)
"""Build dynamic graph"""
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state)
"""Add prediction layer"""
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, input_size_y])
b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0))
rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size])
predictions = tf.matmul(rnn_outputs, W) + b
yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder
"Mean squared error loss"
loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1])))
"Adding regularization"
if lambda_l2_reg > 0 :
| tensorflow.get_variable | 6,479 |
import tensorflow as tf
summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph())
init = tf.global_variables_initializer()
| tensorflow.global_variables_initializer | 6,480 |
import tensorflow as tf
'b_carry', [highway_dim],
initializer=tf.constant_initializer(-2.0),
| tensorflow.constant_initializer | 6,481 |
import tensorflow as tf
tf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model')
tf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set')
tf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs')
tf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs')
tf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every')
tf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization')
tf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ')
tf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information')
tf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode')
tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size')
tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ')
tf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set')
tf.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info')
tf.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps')
FLAGS = tf.app.flags.FLAGS
slim = tf.contrib.slim
AUTOENCODER = 'ae'
PREDICTIVE = 'pred'
DENOISING = 'noise'
CHECKPOINT_NAME = '-9999.chpt'
EMB_SUFFIX = '_embedding'
| tensorflow.app.flags.DEFINE_float | 6,482 |
import tensorflow as tf
s_w2, s_w4, s_w8, s_w16 = \
int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16)
output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3),
[self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))
output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3),
| tensorflow.reshape | 6,483 |
import tensorflow as tf
metrics_dict[name] = fn(logits, labels)
return metrics_dict
return all_metrics_fn
def _remove_summaries():
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
def _del_dict_nones(d):
for k in list(d.keys()):
| tensorflow.get_default_graph | 6,484 |
import tensorflow as tf
if step % 30 == 0 :
summary_str = sess.run(summary_op, feed_dict=train_feed_dict)
summary_writer.add_summary(summary=summary_str, global_step=epoch)
summary_writer.flush()
train_loss /= iteration # average loss
train_acc /= iteration # average accuracy
train_summary = tf.Summary(value=[tf.Summary.Value(tag='train_loss', simple_value=train_loss),
tf.Summary.Value(tag='train_accuracy', simple_value=train_acc)])
# test_acc, test_loss, test_summary = Evaluate(sess)
summary_writer.add_summary(summary=train_summary, global_step=epoch)
# summary_writer.add_summary(summary=test_summary, global_step=epoch)
summary_writer.flush()
| tensorflow.Summary.Value | 6,485 |
import tensorflow as tf
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
| tensorflow.reset_default_graph | 6,486 |
import tensorflow as tf
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
| tensorflow.get_variable_scope | 6,487 |
import tensorflow as tf
# @1. split sequence
with tf.variable_scope('split_seq'):
block_num = tf.cast(tf.ceil(tf.divide(tf.cast(sl, tf.float32), tf.cast(block_len, tf.float32))), tf.int32)
comp_len = block_num * block_len - sl
| tensorflow.cast | 6,488 |
import tensorflow as tf
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
| tensorflow.gfile.Open | 6,489 |
import tensorflow as tf
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
learning_rate = tf.placeholder(tf.float32, shape=[])
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(test.loss)
sess.run(tf.global_variables_initializer())
allloss = []
validloss = []
itr = 0
| tensorflow.global_variables_initializer | 6,490 |
import tensorflow as tf
]
for i in range(len(output_sizes)):
expand_W = tf.get_variable("W_%d" % i, [current_size, output_sizes[i]])
expand_b = tf.get_variable("b_%d" % i, [output_sizes[i]])
output_data = tf.nn.bias_add(tf.matmul(output_data, expand_W), expand_b)
output_data = tf.nn.elu(output_data)
| tensorflow.get_variable | 6,491 |
import tensorflow as tf
else:
if type(padding)==type(''):
conv = tf.nn.conv2d(x, w, stride, padding)
else:
conv = tf.pad(x, padding, "CONSTANT")
conv = tf.nn.conv2d(conv, w, stride, padding='VALID')
if bias != -1:
bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias))
variable_summaries(bias)
conv = tf.nn.bias_add(conv, bias)
tf.add_to_collection('debug_layers', conv)
return conv
@staticmethod
def _relu(name, x):
with tf.variable_scope(name):
return tf.nn.relu(x)
| tensorflow.nn.bias_add | 6,492 |
import tensorflow as tf
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
with tf.variable_scope('l1'):
n_l1 = 700
# combine the action and states together in this way
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
with tf.variable_scope('l2'):
net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w,
bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('q'):
q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
| tensorflow.get_variable | 6,493 |
import tensorflow as tf
for method in at_methods:
FLAGS.adv_training_method = method
with tf.Graph().as_default():
graphs.VatxtModel().classifier_graph()
# Ensure variables have been reused
# Embedding + LSTM layers + hidden layers + logits layer
expected_num_vars = 1 + 2 * FLAGS.rnn_num_layers + 2 * (
FLAGS.cl_num_layers) + 2
self.assertEqual(len(tf.trainable_variables()), expected_num_vars)
def testSyncReplicas(self):
FLAGS.sync_replicas = True
graphs.VatxtModel().language_model_training()
def testCandidateSampling(self):
FLAGS.num_candidate_samples = 10
graphs.VatxtModel().language_model_training()
| tensorflow.trainable_variables | 6,494 |
import tensorflow as tf
weights=target_weights)
max_src_len = tf.shape(reconstructed_weights)[1]
batch_size = tf.shape(reconstructed_weights)[0]
attn_loss = tf.matmul(reconstructed_weights, attention_weights) - tf.eye(max_src_len)
src_mask = tf.sequence_mask(encoder_input_length[0], maxlen=max_src_len, dtype=tf.float32)
src_mask = tf.einsum('ij,ik->ijk', src_mask, src_mask)
attn_loss *= tf.to_float(src_mask) # don't take padding words into account
| tensorflow.eye | 6,495 |
import tensorflow as tf
mean = 0
std = tf.random.uniform(shape=[],minval=5,maxval=10,dtype=tf.float32) # std [5-10]
size = tf.random.uniform(shape=[],minval=3,maxval=7,dtype=tf.int32) # size [7-15]
self.kernel = self.gaussian_kernel(size,mean,std)
self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])
self.paddings = tf.convert_to_tensor([[size,size],[size,size],[0,0]])
x_aug = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')
x_aug = tf.squeeze(x_aug)
return tf.concat([x, x_aug],axis=2)
def high_low_pass(self,x):
x_low = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')
| tensorflow.pad | 6,496 |
import tensorflow as tf
self.d_optimizer = tf.train.AdamOptimizer(self.learning_rate,beta1=self.beta1,beta2=self.beta2).minimize(self.discriminator_loss,var_list=self.d_variables)
self.g_optimizer = tf.train.AdamOptimizer(self.learning_rate,beta1=self.beta1,beta2=self.beta2).minimize(self.generator_loss,var_list=self.g_variables)
| tensorflow.train.AdamOptimizer | 6,497 |
import tensorflow as tf
counts.update(_split_string(line))
alphabet = [k for (k, _) in counts.most_common(max_size)]
alphabet.sort()
return np.asarray(alphabet, dtype=np.object)
chars, = tf.py_func(_unique_chars, [filename], [tf.string])
char_to_id = tf.contrib.lookup.index_table_from_tensor(
chars, num_oov_buckets=num_oov_buckets)
id_to_char = tf.contrib.lookup.index_to_string_table_from_tensor(chars, " ")
return char_to_id, id_to_char
| tensorflow.contrib.lookup.index_table_from_tensor | 6,498 |
import tensorflow.contrib.layers as layers
def atari_model(img_in, num_actions, scope, reuse=False):
# as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
def atari_learn(env,
session,
num_timesteps):
# This is just a rough estimate
num_iterations = float(num_timesteps) / 4.0
| tensorflow.contrib.layers.fully_connected | 6,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.