seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
encoder_inputs_ = tf.concat(encoder_inputs_, axis=2)
if encoder.convolutions:
if encoder.binary:
raise NotImplementedError
pad = tf.nn.embedding_lookup(embeddings, utils.BOS_ID)
pad = tf.expand_dims(tf.expand_dims(pad, axis=0), axis=1)
pad = tf.tile(pad, [batch_size, 1, 1])
# Fully Character-Level NMT without Explicit Segmentation, Lee et al. 2016
inputs = []
for w, filter_size in enumerate(encoder.convolutions, 1):
| tensorflow.expand_dims | 3,800 |
from tensorflow.python.framework import constant_op
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
def _train_input_fn():
features = {"x": constant_op.constant([[2.], [1.], [1.]])}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _ranking_train_input_fn():
features = {
"a.f1": constant_op.constant([[3.], [0.3], [1.]]),
"a.f2": constant_op.constant([[0.1], [3.], [1.]]),
"b.f1": constant_op.constant([[13.], [0.4], [5.]]),
"b.f2": constant_op.constant([[1.], [3.], [0.01]]),
}
| tensorflow.python.framework.constant_op.constant | 3,801 |
import tensorflow as tf
tf.summary.histogram("gradient_norm", gradient_norms)
tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize
# gradients (in case of explosion)
return tf.summary.merge_all()
def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss,
loss):
values = [
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss",
simple_value=before_loss),
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss",
simple_value=after_loss),
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss",
simple_value=stop_token_loss),
tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss),
]
if linear_loss is not None:
values.append(tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss",
simple_value=linear_loss))
test_summary = tf.Summary(value=values)
summary_writer.add_summary(test_summary, step)
def time_string():
return datetime.now().strftime("%Y-%m-%d %H:%M")
def model_train_mode(args, feeder, hparams, global_step):
with tf.variable_scope("Tacotron_model", reuse=tf.AUTO_REUSE) as scope:
| tensorflow.Summary.Value | 3,802 |
import tensorflow as tf
tf.app.flags.DEFINE_integer('ws_nb_iters_ft', 400, 'WS: # of iterations for global fine-tuning')
tf.app.flags.DEFINE_integer('ws_nb_iters_feval', 25, 'WS: # of iterations for fast evaluation')
tf.app.flags.DEFINE_float('ws_prune_ratio_exp', 3.0, 'WS: pruning ratio\'s exponent term')
tf.app.flags.DEFINE_float('ws_iter_ratio_beg', 0.1, 'WS: iteration ratio (at starting time)')
tf.app.flags.DEFINE_float('ws_iter_ratio_end', 0.5, 'WS: iteration ratio (at ending time)')
tf.app.flags.DEFINE_float('ws_mask_update_step', 500, 'WS: step size for updating the pruning mask')
| tensorflow.app.flags.DEFINE_float | 3,803 |
import tensorflow as tf
"""
Function that applies a affine transformation
in the input tensor using the variables
from the dict layer.
:type input_tensor: tf tensor
:type layer: dictionary
:rtype: tf tensor
"""
return tf.add(tf.matmul(input_tensor, layer['weights']),
layer['bias'])
| tensorflow.matmul | 3,804 |
import tensorflow as tf
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(label_ids.shape[0], dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
| tensorflow.ones | 3,805 |
import tensorflow as tf
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
| tensorflow.constant | 3,806 |
import tensorflow as tf
# The names are different and will work.
slice_saver = tf.train.Saver({"first": v1, "second": v2})
tf.initialize_all_variables().run()
# Exports to meta_graph
| tensorflow.initialize_all_variables | 3,807 |
import tensorflow as tf
output = f(tf.constant([1, 2]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
| tensorflow.train.start_queue_runners | 3,808 |
import tensorflow as tf
Define RNN graph
"""
def build_multilayer_rnn_graph_with_dynamic_rnn(cell_type, activation,state_size, num_steps, num_layers, input_size_x, input_size_y , learning_rate, lambda_l2_reg,random_seed=0):
reset_graph()
tf.set_random_seed(random_seed) #make reproducible results
input_size_x += input_size_y
"""Define the graph inputs"""
batch_size = tf.placeholder(tf.int32, [], name='batch_size')
x = tf.placeholder(tf.float32, [None, num_steps, input_size_x], name='x')
y = tf.placeholder(tf.float32, [None, num_steps, input_size_y], name='y')
input_prob = tf.placeholder(tf.float32, name='input_prob')
state_prob = tf.placeholder(tf.float32,name='state_prob')
output_prob = tf.placeholder(tf.float32,name='output_prob')
rnn_inputs = x
"""Define a single cell with variational dropout"""
def get_a_cell(state_size,input_prob,state_prob,num_input):
| tensorflow.placeholder | 3,809 |
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
def _initAssignFetch(self, x, y, use_gpu=False):
"""Initialize a param to init and update it with y."""
super(AssignOpTest, self).setUp()
with self.test_session(use_gpu=use_gpu):
p = tf.Variable(x)
assign = tf.assign(p, y)
p.initializer.run()
new_value = assign.eval()
return p.eval(), new_value
def _initAssignAddFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param += y."""
| tensorflow.Variable | 3,810 |
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
samples = tf.random_normal(shape=[hparams.n_samples, hparams.x_dim])
x_, v_, x_accept_prob, x_out = dynamics.apply_transition(samples)
self.assertEqual(x_.shape, v_.shape)
self.assertEqual(x_out.shape, samples.shape)
self.assertEqual(x_.shape, x_out.shape)
self.assertEqual(x_accept_prob.shape, (hparams.n_samples,))
# Graph mode testing
with tf.Graph().as_default():
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
x_, v_, x_accept_prob, x_out = dynamics.apply_transition(x)
samples = npr.normal(size=[hparams.n_samples, hparams.x_dim])
| tensorflow.contrib.eager.python.examples.l2hmc.l2hmc.get_scg_energy_fn | 3,811 |
import tensorflow as tf
horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon)
# horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon)
pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1])
tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1])
tgt_dif = tgt_flat1 - tgt_flat2
pred_dif = pred_flat1 - pred_flat2
geq = tf.cast(tgt_dif > 0, tf.bool)
| tensorflow.reshape | 3,812 |
import tensorflow as tf
self._weight_contrastive_loss_d = weight_contrastive_loss_d
self._aug_color_jitter_prob = aug_color_jitter_prob
self._aug_color_drop_prob = aug_color_drop_prob
# To safe memory ModularGAN supports feeding real and fake samples
# separately through the discriminator. CLGAN does not support this to
# avoid additional additional complexity in create_loss().
assert not self._deprecated_split_disc_calls, \
"Splitting discriminator calls is not supported in CLGAN."
def _latent_projections(self, latents):
bs, dim = latents.get_shape().as_list()
with tf.variable_scope("discriminator_z_projection", reuse=tf.AUTO_REUSE) as scope:
k1 = tf.get_variable("kernel1", [dim, dim * 4])
k2 = tf.get_variable("kernel2", [dim * 4, dim])
z_proj = tf.matmul(tf.nn.leaky_relu(tf.matmul(latents, k1), name=scope.name), k2)
z_proj = z_proj / tf.reshape(tf.norm(z_proj, ord=2, axis=-1), [bs, 1])
return z_proj
def create_loss(self, features, labels, params, is_training=True):
"""Build the loss tensors for discriminator and generator.
This method will set self.d_loss and self.g_loss.
Args:
features: Optional dictionary with inputs to the model ("images" should
contain the real images and "z" the noise for the generator).
| tensorflow.get_variable | 3,813 |
import tensorflow as tf
n_bins = 2**n_bits
rgb = tf.image.decode_png(tf.read_file(img_path), channels=3, dtype=tf.uint8)
| tensorflow.read_file | 3,814 |
import tensorflow as tf
def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb):
k = util.shape(top_span_emb, 0)
c = util.shape(top_antecedents, 1)
feature_emb_list = []
if self.config["use_metadata"]:
top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]
same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]), tf.to_int32(same_speaker)) # [k, c, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb]
feature_emb_list.append(tiled_genre_emb)
| tensorflow.gather | 3,815 |
import tensorflow as tf
self.assertAllEqual(true_kl, kl_)
self.assertAllClose(true_kl, kl_sample_, atol=0., rtol=1e-2)
zero_kl = tfd.kl_divergence(a, a)
true_zero_kl_, zero_kl_ = self.evaluate([tf.zeros_like(true_kl), zero_kl])
self.assertAllEqual(true_zero_kl_, zero_kl_)
def testParetoParetoKLInfinite(self):
| tensorflow.zeros_like | 3,816 |
import tensorflow as tf
train_writer.close()
validation_writer.close()
def _feature_matching_loss(self, real_data_features, fake_data_features):
real_data_mean = tf.reduce_mean(real_data_features, axis=0)
fake_data_mean = tf.reduce_mean(fake_data_features, axis=0)
feature_loss = tf.reduce_mean(tf.abs(tf.subtract(real_data_mean, fake_data_mean)))
return feature_loss
| tensorflow.reduce_mean | 3,817 |
import tensorflow as tf
scale_mode=scale_mode,
name=name,
)
@tf.keras.utils.register_keras_serializable(package="Addons")
class ExponentialCyclicalLearningRate(CyclicalLearningRate):
def __init__(
self,
initial_learning_rate,
| tensorflow.keras.utils.register_keras_serializable | 3,818 |
import tensorflow as tf
# Weights has shape [batch_size]. Reshape to [batch_size, 1].
weights = tf.reshape(weights, [-1, 1])
| tensorflow.reshape | 3,819 |
import tensorflow as tf
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
| tensorflow.FixedLenFeature | 3,820 |
import tensorflow as tf
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
| tensorflow.shape | 3,821 |
import tensorflow as tf
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu})
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.5
# Placeholders
self.sess = tf.Session(config=config)
self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0]
self.a_bound = (env.action_space.high - env.action_space.low) / 2
self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action')
self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state')
self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage')
self.rewards = tf.placeholder(tf.float32, [None, 1], 'rewards')
self.keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
# Dateset with experiennce replay
self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions,
'rewards': self.rewards, 'advantage': self.advantage})
self.dataset = self.dataset.batch(self.MINIBATCH, drop_remainder=True)
self.data_iter = self.dataset.make_initializable_iterator()
batch = self.data_iter.get_next()
# Call ppo net
pi_old, pi_old_params, _, _ = self.build_anet(batch['state'], 'oldpi')
| tensorflow.placeholder | 3,822 |
import tensorflow as tf
summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
| tensorflow.variable_scope | 3,823 |
import tensorflow as tf
grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, dtype)) * stride
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2.
box_x2y2 = box_xy + box_wh / 2.
box = tf.concat([box_x1y1, box_x2y2], axis=-1)
boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4)))
objects.append(tf.reshape(obj, (x_shape[0], -1, 1)))
classes.append(tf.reshape(cls, (x_shape[0], -1, num_classes)))
boxes = tf.concat(boxes, axis=1)
objects = tf.concat(objects, axis=1)
classes = tf.concat(classes, axis=1)
scores = objects * classes
boxes, scores, classes, valid = tf.image.combined_non_max_suppression(
boxes=boxes,
scores=scores,
max_output_size_per_class=max_outputs,
max_total_size=max_outputs,
iou_threshold=iou_threshold,
| tensorflow.reshape | 3,824 |
from tensorflow.python.framework import ops
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
| tensorflow.python.framework.ops.RegisterShape | 3,825 |
import tensorflow as tf
reg = 0
# L1 weight regularization
reg += self.L1_in * tf.reduce_mean(tf.abs(self.W_in) * self.input_Connectivity)
reg += self.L1_rec * tf.reduce_mean(tf.abs(self.W_rec) * self.rec_Connectivity)
if self.dale_ratio:
reg += self.L1_out * tf.reduce_mean(tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out))
else:
reg += self.L1_out * tf.reduce_mean(tf.abs(self.W_out) * self.output_Connectivity)
# L2 weight regularization
reg += self.L2_in * tf.reduce_mean(tf.square(tf.abs(self.W_in) * self.input_Connectivity))
| tensorflow.abs | 3,826 |
import tensorflow as tf
with tf.variable_scope('layer1'):
w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer())
b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0))
h1 = tf.nn.relu(tf.matmul(x, w1) + b1)
with tf.variable_scope('layer2'):
w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer())
b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0))
h2 = tf.nn.relu(tf.matmul(h1, w2) + b2)
with tf.variable_scope('layer3'):
w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer())
b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0))
y = tf.matmul(h2, w3) + b3
# losses
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y))
# optimizer
optimizer = tf.train.GradientDescentOptimizer(args.lr)
# define one-step train ops
train_op = optimizer.minimize(cross_entropy)
return x, y, gt, train_op
| tensorflow.constant_initializer | 3,827 |
import tensorflow as tf
b_out_initializer = tf.constant_initializer(0.0)
else:
print("Loading Weights")
weights = np.load(self.load_weights_path)
init_state_initializer = tf.constant_initializer(weights['init_state'])
W_in_initializer = tf.constant_initializer(weights['W_in'])
W_rec_initializer = tf.constant_initializer(weights['W_rec'])
W_out_initializer = tf.constant_initializer(weights['W_out'])
b_rec_initializer = tf.constant_initializer(weights['b_rec'])
b_out_initializer = tf.constant_initializer(weights['b_out'])
| tensorflow.constant_initializer | 3,828 |
import tensorflow as tf
"""Flatten the image."""
img = features['image']
flat = tf.cast(tf.reshape(img, [-1]), tf.int64)
tgt = tf.expand_dims(targets, axis=0)
| tensorflow.reshape | 3,829 |
import tensorflow as tf
'c3',
nf=64,
rf=3,
stride=1,
init_scale=np.sqrt(2)))
nh = np.prod([v.value for v in c3.get_shape()[1:]])
h3 = tf.reshape(c3, [-1, nh])
pre_s = tf.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
# Critic
# 定義變數
# self.tfs = tf.placeholder(tf.float32, [None, image_features], 'state')
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
# 建立網路層
l1 = tf.layers.dense(
inputs=pre_s,
units=100, # number of hidden units
activation=tf.nn.relu,
name='l1'
)
self.v = tf.layers.dense(
inputs=l1,
units=1, # output units
| tensorflow.placeholder | 3,830 |
import tensorflow.contrib.layers as layers
out = layers.flatten(out)
with tf.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
| tensorflow.contrib.layers.fully_connected | 3,831 |
import tensorflow as tf
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(inputs=shortcut, training=training,
data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
| tensorflow.nn.relu | 3,832 |
import tensorflow as tf
correct = tf.equal(
tf.cast(tf.ones_like(label_ids, dtype=tf.int32), tf.int32),
tf.cast(pred_label, tf.int32)
)
st_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
pred_label = tf.argmax(distillation_loss["te_logits"], axis=-1, output_type=tf.int32)
correct = tf.equal(
tf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32),
tf.cast(pred_label, tf.int32)
)
te_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
| tensorflow.argmax | 3,833 |
import tensorflow as tf
# Initialize NN
self.weights, self.biases = self.initialize_NN(layers)
# Initialize parameters
self.lambda_1 = tf.Variable([0.0], dtype=tf.float32)
self.lambda_2 = tf.Variable([-6.0], dtype=tf.float32)
# Load IRK weights
tmp = np.float32(np.loadtxt('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % (q), ndmin = 2))
weights = np.reshape(tmp[0:q**2+q], (q+1,q))
self.IRK_alpha = weights[0:-1,:]
| tensorflow.Variable | 3,834 |
import tensorflow as tf
self,
'fb_bias_%s' % idx,
tf.get_variable(
name='%s_fb_bias_%s' % (self.layer_name, idx),
dtype=self.dtype,
initializer=tf.ones([lower_feats], dtype=self.dtype),
trainable=True))
setattr(
self,
'ff_kernel_%s' % idx,
| tensorflow.ones | 3,835 |
import tensorflow as tf
result_branch = _evaluate_spherical_harmonics_branch(
degree_l, order_m, theta, phi, sign_m, var_type)
return tf.where(tf.equal(order_m, zeros), result_m_zero, result_branch)
| tensorflow.equal | 3,836 |
from tensorflow.python.ops import math_ops
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'recall_at_thresholds',
[predictions, labels]):
(true_positives, false_negatives, _, _, true_positives_compute_op,
false_negatives_compute_op, _, _,) = _tp_fn_tn_fp(
predictions, labels, thresholds, weights)
# avoid division by zero
epsilon = 1e-7
def compute_recall(name):
recall = math_ops.div(true_positives,
epsilon + true_positives + false_negatives,
name='recall_' + name)
return recall
recall = compute_recall('value')
with ops.control_dependencies([true_positives_compute_op,
false_negatives_compute_op]):
update_op = compute_recall('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
| tensorflow.python.ops.math_ops.div | 3,837 |
import tensorflow as tf
logits, feat = resnet_model_fn(x, training=training_flag)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits))
Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5))
l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
Center_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num)
Total_loss = cost + l2_loss
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(Total_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# val_dir = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_images_20170908/'
# annotations = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_annotations_20170908.json'
# # a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:
# df = MyDataFlow(val_dir, annotations, is_training=False, batch_size=batch_size, img_size=image_size)
# # start 3 processes to run the dataflow in parallel
# df = PrefetchDataZMQ(df, nr_proc=10)
# df.reset_state()
# scene_data_val = df.get_data()
train_dir = '/data0/AIChallenger/data_256'
annotations = '/data0/AIChallenger/data_256.json'
# a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:
| tensorflow.argmax | 3,838 |
import tensorflow as tf
model_lam=3e-4,
activation=output_activation,
name="Out")
x = out_layer(x)
regularization += out_layer.regularization
return x, regularization
def mlp_dropout(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None, dropout_rate=0):
for h in hidden_sizes[:-1]:
x = tf.layers.dense(x, units=h, activation=activation)
x = tf.layers.dropout(x, rate=dropout_rate, training=True)
x = tf.layers.dropout(x, rate=dropout_rate, training=True)
return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)
def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):
for h in hidden_sizes[:-1]:
x = tf.layers.dense(x, units=h, activation=activation)
return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)
| tensorflow.layers.dense | 3,839 |
import tensorflow as tf
landm_true = tf.reshape(y_true[..., 4:14], [num_batch * num_prior, 10])
landm_valid = tf.reshape(y_true[..., 14], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 15], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
mask_pos = tf.equal(class_true, 1)
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
| tensorflow.equal | 3,840 |
import tensorflow as tf
bboxes = tf.placeholder(tf.float32)
bboxes_val = [[10, 10, 20, 22]]
gt_boxes = tf.placeholder(tf.float32)
gt_boxes_val = [[11, 13, 34, 31]]
imshape = tf.placeholder(tf.int32)
imshape_val = (100, 100)
deltas = encode(bboxes, gt_boxes)
decoded_bboxes = decode(bboxes, deltas)
final_decoded_bboxes = clip_boxes(decoded_bboxes, imshape)
with tf.Session() as sess:
final_decoded_bboxes = sess.run(final_decoded_bboxes, feed_dict={
bboxes: bboxes_val,
gt_boxes: gt_boxes_val,
imshape: imshape_val,
})
assert np.all(gt_boxes_val == final_decoded_bboxes)
| tensorflow.Session | 3,841 |
from tensorflow.python.framework import ops
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
| tensorflow.python.framework.ops.convert_to_tensor | 3,842 |
import tensorflow as tf
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
| tensorflow.layers.dense | 3,843 |
import tensorflow as tf
output_propensity_list = []
for i in range(list_size):
# Add position information (one-hot vector)
click_feature = [tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(4*list_size)]
click_feature[i] = tf.expand_dims(tf.ones_like(self.labels[i]) , -1)
# click_feature[list_size:]=[tf.expand_dims(tf.zeros_like(self.labels[i]) , -1) for _ in range(3*list_size)]
click_feature[list_size:list_size+i] =[tf.expand_dims(self.labels[k] , -1) for k in range(i-1,-1,-1)]
| tensorflow.zeros_like | 3,844 |
import tensorflow as tf
global _CurrentTowerContext
assert _CurrentTowerContext is None, "Cannot nest TowerContext!"
_CurrentTowerContext = self
if self.is_training:
curr_vs = tf.get_variable_scope()
assert curr_vs.name == '', "In training, cannot nest TowerContext with an existing variable scope!"
self._ctxs = self._get_scopes()
self._ctxs.append(self._collection_guard)
for c in self._ctxs:
c.__enter__()
if get_tf_version_number() >= 1.2:
# check that ns_name is always the same as _name
ns = tf.get_default_graph().get_name_scope()
assert ns == self._name, \
"Name conflict: name_scope inside tower '{}' becomes '{}'!".format(self._name, ns) \
+ " You may need a different name for the tower!"
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global _CurrentTowerContext
_CurrentTowerContext = None
if not self.has_own_variables:
diff_trainable_vars = self._collection_guard.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES)
assert len(diff_trainable_vars) == 0, \
"New TRAINABLE_VARIABLES shouldn't be created in {}: ".format(
| tensorflow.get_default_graph | 3,845 |
from tensorflow.python.framework import ops
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
raise ValueError(
"dimension (%d) must be in the range [0, %d), where %d is the number "
"of dimensions in the input"
% (dimension, input_shape.ndims, input_shape.ndims))
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
"""Common shape function for reduction ops."""
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.ConstantValue(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
| tensorflow.python.framework.ops.RegisterShape | 3,846 |
import tensorflow as tf
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
| tensorflow.variable_scope | 3,847 |
import tensorflow as tf
output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))
output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8])
output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3),
| tensorflow.reshape | 3,848 |
from tensorflow.python.ops import array_ops
if (weights_shape.is_fully_defined() and
values_shape.is_fully_defined() and
weights_shape.is_compatible_with(values_shape)):
return weights
return math_ops.mul(
weights, array_ops.ones_like(values), name='broadcast_weights')
def streaming_mean(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
| tensorflow.python.ops.array_ops.ones_like | 3,849 |
import tensorflow as tf
# word_strings = self.id2word(word_ids, name='word_strings')
# print(word_strings)
if self.mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"pred_ids": pred_ids, "tags": pred_strings}
return tf.estimator.EstimatorSpec(self.mode, predictions=predictions)
else:
# true_tag_ids = self.labels
true_tag_ids = self.tag2id(self.labels, "labels")
# print(pred_strings)
# print(self.labels)
| tensorflow.estimator.EstimatorSpec | 3,850 |
import tensorflow as tf
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
progress=tf.constant(0.0),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.test_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2))
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_blend_images_in_transition_stage(self):
x_np = np.random.normal(size=[2, 8, 8, 3])
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
tf.constant(0.2),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.test_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = 0.8 * sess.run(
layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_num_filters(self):
| tensorflow.constant | 3,851 |
import tensorflow as tf
red_ind = list(range(1, len(x.get_shape())))
avoid_zero_div = 1e-8
square = tf.maximum(avoid_zero_div,
reduce_sum(tf.square(grad),
reduction_indices=red_ind,
keepdims=True))
normalized_grad = old_div(grad, tf.sqrt(square))
else:
normalized_grad = tf.sign(grad)
normalized_grad = tf.stop_gradient(normalized_grad)
scaled_grad = eps * normalized_grad
| tensorflow.sqrt | 3,852 |
import tensorflow as tf
inputs,
vocabulary_size,
embedding_size,
pad_value=0,
embeddings_initializer=tf.random_uniform_initializer(-0.1, 0.1),
embeddings_kwargs=None,
name='average_embedding',
):
| tensorflow.random_uniform_initializer | 3,853 |
import tensorflow as tf
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
state_in = (c_in, h_in)
rnn_in = tf.expand_dims(self.h3, [0])
step_size = tf.shape(inputs)[:1]
state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
time_major=False)
| tensorflow.nn.rnn_cell.LSTMStateTuple | 3,854 |
import tensorflow as tf
if tf.executing_eagerly():
return
# Test case 3.
x = tf.placeholder_with_default(input=1, shape=None)
is_scalar = normal._is_scalar_helper(x.shape, lambda: tf.shape(x))
self.assertTrue(self.evaluate(is_scalar))
| tensorflow.placeholder_with_default | 3,855 |
import tensorflow as tf
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
with tf.variable_scope("adaptive_model", reuse=False):
adaptive_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
perturb_for_adaption = perturb_vars(original_scope="model", perturbed_scope="adaptive_model/model")
| tensorflow.variable_scope | 3,856 |
import tensorflow as tf
for grad, var in avg_grads
]
else:
clipped_grads = avg_grads
if FLAGS.optimizer == 'momentum':
opt = tf.train.MomentumOptimizer(
learning_rate, FLAGS.momentum, use_nesterov=True)
elif FLAGS.optimizer == 'sgd':
opt = tf.train.GradientDescentOptimizer(learning_rate)
elif FLAGS.optimizer == 'rmsprop':
opt = tf.train.RMSPropOptimizer(learning_rate, FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.rmsprop_epsilon)
else:
raise ValueError('Optimizer "%s" was not recognized', FLAGS.optimizer)
self.variable_mgr.append_apply_gradients_ops(
gradient_state, opt, clipped_grads, training_ops)
train_op = tf.group(*(training_ops + update_ops + extra_nccl_ops))
with tf.device(self.cpu_device):
| tensorflow.train.RMSPropOptimizer | 3,857 |
import tensorflow as tf
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
| tensorflow.flags.DEFINE_string | 3,858 |
import tensorflow as tf
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
| tensorflow.reduce_mean | 3,859 |
import tensorflow as tf
for v in tf.trainable_variables():
if 'batch_normalization' not in v.name and 'rl_controller' not in v.name:
l2_loss.append(tf.nn.l2_loss(v))
l2_loss = FLAGS.dst_weight_decay * tf.add_n(l2_loss)
| tensorflow.nn.l2_loss | 3,860 |
import tensorflow as tf
# Training
start_time = time.time()
for i in range(hparams.n_iters):
_, loss_np = sess.run([train_op, loss])
print("Iteration %d: loss %.4f" % (i, loss_np))
wall_time = (time.time() - start_time) / hparams.n_iters
examples_per_sec = hparams.n_samples / wall_time
self.report_benchmark(
name="graph_train_%s_%d" %
("gpu" if tf.test.is_gpu_available() else "cpu", sample_size),
iters=hparams.n_iters,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
def benchmark_eager(self):
self._benchmark_eager()
def benchmark_eager_defun(self):
self._benchmark_eager(defun=True)
| tensorflow.test.is_gpu_available | 3,861 |
import tensorflow as tf
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
| tensorflow.shape | 3,862 |
from tensorflow.python.framework import tensor_util
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
| tensorflow.python.framework.tensor_util.constant_value | 3,863 |
import tensorflow as tf
GRUCell(half_depth),
GRUCell(half_depth),
rnn_input,
sequence_length=input_lengths,
dtype=tf.float32)
return tf.concat(outputs, axis=2) # Concat forward and backward
def highwaynet(inputs, scope, depth):
with tf.variable_scope(scope):
H = tf.layers.dense(
inputs,
units=depth,
activation=tf.nn.relu,
name='H')
T = tf.layers.dense(
inputs,
units=depth,
activation=tf.nn.sigmoid,
name='T',
| tensorflow.layers.dense | 3,864 |
import tensorflow as tf
# x.get_shape()[-1].value : 3
in_channels = x.get_shape()[-1].value
with tf.variable_scope(layer_name):
w = tf.get_variable(name='weights',
trainable=is_pretrain,
shape=[kernel_size[0],kernel_size[1],in_channels,out_channels],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='bias',
trainable=is_pretrain,
shape=[out_channels],
initializer=tf.constant_initializer(0.0))
x = tf.nn.conv2d(x,w,strides,padding='SAME',name='conv')
x = tf.nn.bias_add(x,b,name='bias_add')
x = tf.nn.relu(x,name='relu')
return x
def pool(layer_name, x, kernel_size=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True):
'''
Pooling op
Args:
Returns:
| tensorflow.nn.bias_add | 3,865 |
import tensorflow as tf
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
| tensorflow.nn.sigmoid | 3,866 |
import tensorflow as tf
fc1 = tf.layers.dense(
inputs=v,
units=1024,
activation=tf.nn.relu,
use_bias=True,
name='fc1'
)
fc2 = tf.layers.dense(
inputs=fc1,
units=1024,
activation=tf.nn.relu,
use_bias=True,
name='fc2'
)
out = tf.layers.dense(
| tensorflow.layers.dense | 3,867 |
import tensorflow as tf
- removed imitation gradients and losss
- removed valid_loss
- removed train_valid
- commented out policy loss (since, discrete)
- next_loc_loss is now new policy loss
- responsible_next_loc is NOW policy
'''
self.value, self.next_loc_mean, self.loc_std, self.next_loc, self.state_out, self.state_in, self.state_init = self._build_net(self.inputs, self.prev_loc, RNN_SIZE, TRAINING, a_size) # self.goal_pos
if TRAINING:
self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget')
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.sampled_next_locs = tf.placeholder(tf.float32, [None,2]) # sampled action is stored here
self.policy = gaussian_pdf(self.next_loc_mean, self.loc_std, self.sampled_next_locs) # Distribution == Policy
# Loss Functions
self.value_loss = 0.5*tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, shape=[-1])))
# H(x) = Sum[p(x)*log(p(x))]
self.entropy = - 0.01 * tf.reduce_sum(self.policy * tf.log(tf.clip_by_value(self.policy,1e-10,1.0)))
self.policy_loss = - 0.2 * tf.reduce_sum( tf.log(tf.clip_by_value(self.policy[:,0],1e-15,1.0)) * self.advantages + tf.log(tf.clip_by_value(self.policy[:,1],1e-15,1.0)) * self.advantages)
#For Normal RL Part
self.loss = self.value_loss + self.policy_loss - self.entropy # removed self.blocking_loss, valid_loss, discrete_policy _loss #+ 0.5*self.mypos_loss + 0.5*self.goalpos_loss
| tensorflow.placeholder | 3,868 |
import tensorflow as tf
anchor_positive_mining_distances=anchor_positive_mining_distances,
anchor_match_mining_distance_matrix=(
anchor_match_mining_distance_matrix)))
def compute_triplet_loss(positive_distances, negative_distances):
losses = tf.nn.relu(positive_distances + margin - negative_distances)
losses = tf.where(
tf.stop_gradient(losses < losses.dtype.max), losses,
tf.zeros_like(losses))
num_nonzero_losses = tf.math.count_nonzero(losses)
loss = tf.math.reduce_mean(losses)
return loss, num_nonzero_losses
loss, num_active_triplets = compute_triplet_loss(anchor_positive_distances,
anchor_negative_distances)
mining_loss, num_active_mining_triplets = compute_triplet_loss(
anchor_positive_mining_distances, anchor_negative_mining_distances)
| tensorflow.math.count_nonzero | 3,869 |
import tensorflow as tf
cluster_ids = cluster_ids[gold_spans]
return tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c):
| tensorflow.to_int32 | 3,870 |
import tensorflow as tf
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
| tensorflow.truncated_normal_initializer | 3,871 |
import tensorflow as tf
epsilon = tf.constant(value=1e-7)
labels = tf.to_float(labels)
# labels = tf.to_float(tf.reshape(labels, (-1, num_classes)))
softmax = tf.nn.softmax(logits) + epsilon
if head is not None:
cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head), axis=[1])
else:
cross_entropy = -tf.reduce_sum(labels * tf.log(softmax), axis=[1])
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return cross_entropy_mean
def triplet_loss(anchor, positive, negative, alpha=0.2, name='triplet_loss'):
"""Calculate the triplet loss according to the FaceNet paper.
| tensorflow.log | 3,872 |
import tensorflow as tf
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram("gradient_norm", gradient_norms)
tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize
# gradients (in case of explosion)
return tf.summary.merge_all()
def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss,
| tensorflow.summary.merge_all | 3,873 |
import tensorflow as tf
return out
def _normalize(self, x, mean, mean_sq, message):
# make sure this is called with a variable scope
shape = x.get_shape().as_list()
assert len(shape) == 4
self.gamma = safe_get("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
gamma = tf.reshape(self.gamma, [1, 1, 1, -1])
self.beta = safe_get("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
beta = tf.reshape(self.beta, [1, 1, 1, -1])
assert self.epsilon is not None
assert mean_sq is not None
assert mean is not None
std = tf.sqrt(self.epsilon + mean_sq - tf.square(mean))
out = x - mean
out = out / std
out = out * gamma
out = out + beta
| tensorflow.constant_initializer | 3,874 |
import tensorflow as tf
x_count, x_mean, x_variance = (
tf_utils.reduce_batch_count_mean_and_var(x, reduce_instance_dims))
combine_inputs = _WeightedMeanAndVarAccumulator(
count=x_count,
mean=x_mean,
variance=x_variance,
weight=tf.zeros([], tf.float32))
output_shape = ()
if not reduce_instance_dims:
# We need to use tf.expand_dims to artificially add a batch dimension.
output_shape = _get_output_shape_from_input(
tf.expand_dims(x_count, axis=0))
| tensorflow.zeros | 3,875 |
import tensorflow as tf
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
elif not do_serve:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
| tensorflow.train.Scaffold | 3,876 |
import tensorflow as tf
batch_size = tf.placeholder(tf.int32, [], name='batch_size')
x = tf.placeholder(tf.float32, [None, num_steps, input_size_x], name='x')
| tensorflow.placeholder | 3,877 |
import tensorflow as tf
padding=padding)
subsamp_sum = tf.multiply(subsamp, kH*kW)
if pnorm == 2:
out = tf.sqrt(subsamp_sum)
else:
out = tf.pow(subsamp_sum, 1/pnorm)
| tensorflow.sqrt | 3,878 |
from tensorflow.python.framework import constant_op
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
def _infer_ranking_train_input_fn():
features = {
"f1": constant_op.constant([[3.], [2], [1.]]),
"f2": constant_op.constant([[0.1], [3.], [1.]])
}
return features, None
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
| tensorflow.python.framework.constant_op.constant | 3,879 |
import tensorflow as tf
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
| tensorflow.parse_single_example | 3,880 |
import tensorflow as tf
w = tf.get_variable(
"w", wshape, initializer=self.ortho_init(init_scale))
b = tf.get_variable(
"b",
bias_var_shape,
initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(
x,
| tensorflow.constant_initializer | 3,881 |
from tensorflow.python.client import session
def _BenchmarkOp(self, op, desc):
burn_in_steps = 10
benchmark_steps = 40
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for i in xrange(burn_in_steps + benchmark_steps):
| tensorflow.python.client.session.Session | 3,882 |
import tensorflow as tf
ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)
vat_loss = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit, is_training=False)
losses['VAT_loss'] = vat_loss
return losses
def main(_):
print(FLAGS.epsilon, FLAGS.top_bn)
np.random.seed(seed=FLAGS.seed)
tf.set_random_seed(np.random.randint(1234))
with tf.Graph().as_default() as g:
with tf.device("/cpu:0"):
images, labels = inputs(batch_size=FLAGS.batch_size,
train=True,
validation=FLAGS.validation,
shuffle=True)
ul_images = tf.placeholder(shape=images.shape, dtype=tf.float32)
'''unlabeled_inputs(batch_size=FLAGS.ul_batch_size,
validation=FLAGS.validation,
shuffle=True)'''
| tensorflow.Graph | 3,883 |
import tensorflow as tf
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` of shape [batch_size, num_labels] or
[batch_size, num_labels, num_anchors]. If the third dimension is present,
the lower bound is computed on each slice [:, :, k] independently.
weights: Per-example loss coefficients, with shape broadcast-compatible with
that of `labels`.
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for indicator functions.
Returns:
A `Tensor` of shape [num_labels] or [num_labels, num_anchors].
"""
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
if logits.get_shape().ndims == 3 and labels.get_shape().ndims < 3:
labels = tf.expand_dims(labels, 2)
loss_on_positives = losses_utils.weighted_surrogate_loss(
labels, logits, surrogate_type, negative_weights=0.0) / maybe_log2
return tf.reduce_sum(weights * (labels - loss_on_positives), 0)
def false_positives_upper_bound(labels, logits, weights, surrogate_type):
"""Calculate an upper bound on the number of false positives.
This upper bound on the number of false positives given `logits` and `labels`
| tensorflow.log | 3,884 |
from tensorflow.python.summary import summary
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError("Unknown type %s for clip_gradients" %
type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
| tensorflow.python.summary.summary.scalar | 3,885 |
import tensorflow as tf
self.tensor_structure,
[tf.placeholder(name=t.name,
dtype=t.dtype,
shape=[self.batch_size // self.total_core_num] + list(t.shape))
for t in nest.flatten(self.tensor_structure)])
for tensor in nest.flatten(tensors):
tf.get_default_graph().clear_collection(tensor.name)
tf.add_to_collection(tensor.name, self)
self._original_tensors = tensors
self._tensors = tensors
if not self.has_batch:
| tensorflow.get_default_graph | 3,886 |
import tensorflow as tf
clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots]
return tf.group(*clear_ops, name='update_grad')
pred = tf.equal(tf.mod(counter, self._niter), 0)
with tf.control_dependencies([update_slot_op]):
if name is None:
name = 'cond_update_grad'
op = tf.cond(pred, update_grad, tf.no_op, name=name).op
return op
if __name__ == '__main__':
# run it with "python -m tensorpack.tfutils.optimizer"
x = tf.get_variable('x', shape=[6])
cost = tf.reduce_sum(tf.abs(x), name='cost')
opt = tf.train.GradientDescentOptimizer(0.01)
opt = AccumGradOptimizer(opt, 5)
min_op = opt.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
with sess.as_default():
for k in range(20):
min_op.run()
print(x.eval())
| tensorflow.abs | 3,887 |
import tensorflow as tf
def _forward(self, x1, x2, **kwargs):
"""
A log normal RV X = exp(mu + sigma*Z) where Z ~ N(0,I).
The forward pass scales to a standard log normal with mu=0, sigma=1 by computing:
exp(Z) = (X / exp(mu))^(1/sigma)
"""
params = self.parameterizer(x1)
mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]
# compute softplus activation
z2, ldj = log_gaussianize(x2, mus, log_sigmas)
z2 = tf.where(x2 > self.epsilon, z2, x2)
ldj = tf.where(x2 > self.epsilon, ldj, tf.zeros_like(ldj))
return z2, tf.math.reduce_sum(ldj, axis=[1,2,3])
def _inverse(self, x1, z2, **kwargs):
params = self.parameterizer(x1)
mus, log_sigmas = params[:,:,:,0::2], params[:,:,:,1::2]
x2, ldj = log_gaussianize(z2, mus, log_sigmas, inverse=tf.constant(True))
x2 = tf.where(z2 > self.epsilon, x2, z2)
ldj = tf.where(z2 > self.epsilon, ldj, tf.zeros_like(ldj))
return x2, tf.math.reduce_sum(ldj, axis=[1,2,3])
| tensorflow.where | 3,888 |
import tensorflow as tf
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [tf.constant(i + 0.5, shape=[2, output_classes])
for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
| tensorflow.constant | 3,889 |
import tensorflow as tf
if not tf.io.gfile.exists(base_dir):
tf.io.gfile.makedirs(base_dir)
tag = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
tb_logdir = osp.join(base_dir, tag, 'tb')
save_dir = osp.join(base_dir, tag, 'train')
tf.io.gfile.makedirs(tb_logdir)
tf.io.gfile.makedirs(save_dir)
writer = tf.contrib.summary.create_file_writer(tb_logdir)
writer.set_as_default()
return writer, save_dir
def record_point_mass_episode(tf_env, tf_policy, savepath=None):
"""Records summaries."""
| tensorflow.contrib.summary.create_file_writer | 3,890 |
import tensorflow as tf
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
| tensorflow.train.init_from_checkpoint | 3,891 |
import tensorflow as tf
def _mask_by_length(t, length):
maxlen = t.get_shape().as_list()[1]
mask = tf.sequence_mask(length, maxlen=maxlen)
mask = tf.expand_dims(tf.cast(mask, tf.float32), -1)
return t * mask
| tensorflow.sequence_mask | 3,892 |
import tensorflow as tf
types[rconst.VALID_POINT_MASK] = np.bool
shapes[rconst.VALID_POINT_MASK] = tf.TensorShape([batch_size])
types = (types, np.bool)
shapes = (shapes, tf.TensorShape([batch_size]))
else:
types[rconst.DUPLICATE_MASK] = np.bool
shapes[rconst.DUPLICATE_MASK] = tf.TensorShape([batch_size])
data_generator = functools.partial(
self.data_generator, epochs_between_evals=epochs_between_evals)
dataset = tf.data.Dataset.from_generator(
generator=data_generator, output_types=types,
output_shapes=shapes)
| tensorflow.TensorShape | 3,893 |
import tensorflow as tf
self.q_maxlen = tf.reduce_max(self.q_len)
self.c = tf.slice(self.c, [0, 0], [N, self.c_maxlen])
| tensorflow.slice | 3,894 |
import tensorflow as tf
self.assertEqual(save_path + "-?????-of-00002", val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
# Restore a different "v0" from shard 0 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
# Restore a different "v1" from shard 1 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = tf.Variable(222)
| tensorflow.initialize_all_variables | 3,895 |
import tensorflow as tf
return tf.estimator.EstimatorSpec(
mode, loss=total_loss,
eval_metric_ops=eval_metric_ops
)
assert mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('learning_rate'):
global_step = tf.train.get_global_step()
learning_rate = tf.train.cosine_decay(
params['initial_learning_rate'],
global_step, decay_steps=params['num_steps']
)
tf.summary.scalar('learning_rate', learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops), tf.variable_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(total_loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
for g, v in grads_and_vars:
tf.summary.histogram(v.name[:-2] + '_hist', v)
tf.summary.histogram(v.name[:-2] + '_grad_hist', g)
with tf.control_dependencies([train_op]), tf.name_scope('ema'):
ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
train_op = ema.apply(tf.trainable_variables())
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)
| tensorflow.get_collection | 3,896 |
from tensorflow.contrib.layers.python.layers import feature_column
}
labels = constant_op.constant(((1,), (0,), (0,)))
return features, labels
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_feature = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
embedding_feature = feature_column.embedding_column(
sparse_feature, dimension=1)
tf_config = {
| tensorflow.contrib.layers.python.layers.feature_column.sparse_column_with_hash_bucket | 3,897 |
import tensorflow as tf
b1 = self.b1.read_value()
params = (w0, b0, w1, b1)
layer0 = tf.matmul(x, w0) + b0
layer1 = tf.nn.sigmoid(layer0)
layer2 = tf.matmul(layer1, w1) + b1
predictions = layer2
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=predictions, labels=y))
grads = tf.gradients(ys=loss, xs=params)
| tensorflow.matmul | 3,898 |
import tensorflow as tf
with tf.variable_scope('VF'):
hidden = tf.layers.dense(inputs=_input,
units=self.vf_hidden_size,
activation=tf.nn.elu)
w = tf.get_variable("weights", (self.vf_hidden_size, 1))
return tf.matmul(hidden, w)
def build_loss(self):
cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1])
dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1)
gcut = tf.stop_gradient(self.g)
mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001
dcos = dot / mag
manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos)
cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1])
log_p = tf.reduce_sum(self.log_pi * self.ac, [1])
worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p
worker_loss = -tf.reduce_sum(worker_loss, axis=0)
Am = self.r - self.manager_vf
manager_vf_loss = .5 * tf.reduce_sum(tf.square(Am))
| tensorflow.norm | 3,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.