seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
X = tf.identity(X)
else:
# For prediction, do batch norm with computed moving mean & variance from training
# Don't update moving averages if predicting
(X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, mean=moving_mean, variance=moving_variance,
epsilon=epsilon, is_training=False)
else:
(X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)
| tensorflow.nn.fused_batch_norm | 7,000 |
import tensorflow as tf
if dropout > 0:
out = tf.layers.dropout(out, rate=dropout, training=training)
out = conv(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init)
h_stack1, h_stack2 = tf.split(out, 2, 3)
sigmoid_out = tf.sigmoid(h_stack2)
out = (h_stack1 * sigmoid_out)
out_shp = out.get_shape().as_list()
if out_shp[1:-1] < in_shp[1:-1]:
| tensorflow.sigmoid | 7,001 |
import tensorflow as tf
# negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard
# note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.)
negtive_mask = tf.logical_and(tf.equal(glabels, 0), gscores > 0.)
#negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.)
#negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask))
fnegtive_mask = tf.cast(negtive_mask, tf.float32)
n_negtives = tf.reduce_sum(fnegtive_mask)
n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32)
n_neg_to_select = tf.minimum(n_neg_to_select, tf.cast(n_negtives, tf.int32))
# hard negative mining for classification
predictions_for_bg = tf.nn.softmax(cls_pred)[:, 0]
prob_for_negtives = tf.where(negtive_mask,
0. - predictions_for_bg,
# ignore all the positives
0. - tf.ones_like(predictions_for_bg))
| tensorflow.cast | 7,002 |
import tensorflow as tf
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool3 = conv3
conv4 = tf.layers.conv2d(pool3, filters=256*amp_factor, kernel_size=[5, 1],
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool4 = conv4
conv5 = tf.layers.conv2d(pool4, filters=256*amp_factor, kernel_size=[5, 1],
data_format='channels_last', padding= "same",
strides=(2, 1),
activation=tf.nn.relu)
pool5 = conv5
pool5 = tf.transpose(pool5, [0, 3, 1, 2])
size = pool5.shape[-1] * pool5.shape[-2] * pool5.shape[-3]
logits = tf.layers.dense(tf.reshape(pool5,(-1, size)), units=256*amp_factor)
return logits
| tensorflow.layers.conv2d | 7,003 |
import tensorflow.contrib as contrib
normalizer_fn=contrib.layers.batch_norm,
normalizer_params={
"is_training": is_training,
"scale": True,
"updates_collections": None
}
):
fc1_1 = contrib.layers.fully_connected(X, 32, scope="fc1_1")
fc1_2 = contrib.layers.fully_connected(X, 32, scope="fc1_2")
if cross_stitch_enabled:
with tf.variable_scope("cross_stitch_1"):
stitch1_1, stitch1_2 = apply_cross_stitch(fc1_1, fc1_2)
else:
stitch1_1, stitch1_2 = fc1_1, fc1_2
| tensorflow.contrib.layers.fully_connected | 7,004 |
import tensorflow as tf
Res.build_graph()
saver = tf.train.Saver()
# Open session and restore checkpoint
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
sess.run(tf.global_variables_initializer())
| tensorflow.ConfigProto | 7,005 |
import tensorflow as tf
slots = self._create_accum_slots(vs)
slots_and_vars = [(s, gv[1]) for s, gv in zip(slots, grads_and_vars)]
# Create the counter on the same device as the first variable.
with tf.variable_scope(self._name), \
vs[0].graph.colocate_with(vs[0]):
counter = tf.Variable(
0, name="counter", trainable=False, dtype=tf.int32)
with tf.name_scope('AccumGradOptimizer'):
ops = []
for s, gv in zip(slots, grads_and_vars):
| tensorflow.Variable | 7,006 |
import tensorflow as tf
assignment = tf.assign(
ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes],
value=tf.boolean_mask(tensor=indices, mask=terminal)
)
# Increment episode count.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign_add(ref=self.episode_count, value=num_episodes)
# Increment memory index.
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(
ref=self.episode_indices[-1],
value=tf.where(self.memory_index + num_instances > self.capacity,
self.episode_indices[self.episode_count - 1], self.capacity - 1)
)
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity))
with tf.control_dependencies(control_inputs=(assignment,)):
return tf.no_op()
def tf_retrieve_indices(self, indices):
"""
Fetches experiences for given indices.
| tensorflow.where | 7,007 |
import tensorflow as tf
if FLAGS.enbl_dst:
logits_dst = self.helper_dst.calc_logits(self.sess_eval, images)
# model definition - weight-sparsified model
with tf.variable_scope(self.model_scope):
# loss & extra evaluation metrics
logits = self.forward_eval(images)
loss, metrics = self.calc_loss(labels, logits, self.trainable_vars)
| tensorflow.variable_scope | 7,008 |
import tensorflow as tf
loc_layer2 = Dense(units=loc_layer_size)(loc_layer1)
# Concatenationation of above layers, followed by FC layer
concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2
h1 = Dense(units=RNN_SIZE)(concat)
h2 = Dense(units=RNN_SIZE)(h1)
self.h3 = tf.nn.relu(h2+concat)
#Recurrent network for temporal dependencies
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE,state_is_tuple=True)
c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
state_in = (c_in, h_in)
rnn_in = tf.expand_dims(self.h3, [0])
step_size = tf.shape(inputs)[:1]
state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
state_out = (lstm_c[:1, :], lstm_h[:1, :])
self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE])
| tensorflow.placeholder | 7,009 |
import tensorflow as tf
b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1])
with tf.device('/gpu:1'):
c = tf.matmul(a,b)
c = tf.reshape(c, [-1])
with tf.device('/gpu:2'):
d = tf.matmul(b,a)
flat_d = tf.reshape(d, [-1])
combined = tf.mul(c, flat_d)
print(sess.run(combined)) | tensorflow.mul | 7,010 |
import tensorflow as tf
[1, 0, 1, 0, 1]], dtype=tf.int32)
iou = isu.points_mask_iou(masks1=masks1, masks2=masks2)
expected_iou = tf.constant([[0, 0, 0],
[0, 1, 0.6],
[0, 0.6, 1.0],
[0, 0.4, 0]], dtype=tf.float32)
self.assertAllClose(iou.numpy(), expected_iou.numpy())
def test_points_mask_pairwise_iou(self):
masks1 = tf.constant([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=tf.int32)
masks2 = tf.constant([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 0, 1, 1, 1]], dtype=tf.int32)
pairwise_iou = isu.points_mask_pairwise_iou(masks1=masks1, masks2=masks2)
| tensorflow.constant | 7,011 |
import tensorflow as tf
state_keep_prob=state_prob)
return cell_drop
"""Wrap the cell in multilayer"""
cell=tf.nn.rnn_cell.MultiRNNCell([get_a_cell(state_size,input_prob,state_prob,input_size_x if layer==0 else state_size) for layer in range(num_layers)],state_is_tuple=True)
cell=tf.nn.rnn_cell.DropoutWrapper(cell,variational_recurrent=True,dtype=tf.float32,input_size=input_size_x,output_keep_prob=output_prob)
init_state = cell.zero_state(batch_size, dtype=tf.float32)
"""Build dynamic graph"""
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state)
"""Add prediction layer"""
| tensorflow.nn.rnn_cell.DropoutWrapper | 7,012 |
import tensorflow as tf
print(update_ops)
with tf.control_dependencies(update_ops):
"""
def get_variables(self):
return {}
class Lrelu(object):
def __init__(self,leak=0.2,name='lrelu') :
self.leak = leak
self.name = name
def __call__(self, x, **kwargs) :
return tf.maximum(x, self.leak*x, name=self.name)
def get_variables(self):
return {}
class ResidualBlock() :
def __init__(self,name,filters,filter_size=3,non_linearity=Lrelu,normal_method=InstanceNorm) :
self.conv_1 = Conv2d(name+'_1',filters,filters,filter_size,filter_size,1,1)
self.normal = normal_method(name+'_norm')
self.nl = non_linearity()
self.conv_2 = Conv2d(name+'_2',filters,filters,filter_size,filter_size,1,1)
def __call__(self,input_var) :
| tensorflow.maximum | 7,013 |
import tensorflow as tf
def _double_factorial_loop_body(n, result, two):
result = tf.where(tf.greater_equal(n, two), result * n, result)
return n - two, result, two
def _double_factorial_loop_condition(n, result, two):
return tf.cast(tf.math.count_nonzero(tf.greater_equal(n, two)), tf.bool)
def double_factorial(n: TensorLike) -> TensorLike:
n = tf.convert_to_tensor(value=n)
two = tf.ones_like(n) * 2
result = tf.ones_like(n)
_, result, _ = tf.while_loop(
cond=_double_factorial_loop_condition,
body=_double_factorial_loop_body,
loop_vars=[n, result, two])
return result
def factorial(n: TensorLike) -> TensorLike:
n = tf.convert_to_tensor(value=n)
return tf.exp(tf.math.lgamma(n + 1))
| tensorflow.ones_like | 7,014 |
from tensorflow.python.ops import math_ops
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
default_name = _at_k_name('false_positive', k, class_id=class_id)
with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = contrib_variables.local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
| tensorflow.python.ops.math_ops.reduce_sum | 7,015 |
import tensorflow as tf
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
loss = tf.reshape(per_example_loss, [-1, tf.shape(positions)[1]])
# TODO: dynamic gather from per_example_loss
return loss
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
| tensorflow.one_hot | 7,016 |
import tensorflow as tf
def _build_net(self, s, a, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
with tf.variable_scope('l1'):
n_l1 = 700
# combine the action and states together in this way
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
with tf.variable_scope('l2'):
net = tf.layers.dense(net, 20, activation=tf.nn.relu, kernel_initializer=init_w,
bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('q'):
q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
| tensorflow.get_variable | 7,017 |
import tensorflow as tf
l = x.get_shape().as_list()[0]
return tf.reshape(tf.sqrt(tf.reduce_sum(x ** 2, axis=1)), (l, 1))
| tensorflow.reduce_sum | 7,018 |
import tensorflow as tf
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
if i >= num_actual_predict_examples:
break
probabilities = prediction["probabilities"]
texta=predict_examples[i].text_a
texta=tokenizer.tokenize(texta)
phrase=[texta[j] if probabilities[j]>=0.5 else ' ' for j in range(min(len(texta),128))]
| tensorflow.gfile.GFile | 7,019 |
import tensorflow as tf
def _build_rnn_graph_cudnn(self, inputs, config, is_training):
"""Build the inference graph using CUDNN cell."""
inputs = tf.transpose(inputs, [1, 0, 2])
self._cell = tf.contrib.cudnn_rnn.CudnnLSTM(
num_layers=config.num_layers,
num_units=config.hidden_size,
input_size=config.hidden_size,
dropout=1 - config.keep_prob if is_training else 0)
params_size_t = self._cell.params_size()
self._rnn_params = tf.get_variable(
"lstm_params",
initializer=tf.random_uniform(
[params_size_t], -config.init_scale, config.init_scale),
validate_shape=False)
c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size],
tf.float32)
self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)
outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training)
outputs = tf.transpose(outputs, [1, 0, 2])
outputs = tf.reshape(outputs, [-1, config.hidden_size])
return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)
def _get_lstm_cell(self, config, is_training):
#if config.rnn_mode == BASIC:
# return tf.contrib.rnn.BasicLSTMCell(
# config.hidden_size, forget_bias=0.0, state_is_tuple=True,
# reuse=not is_training)
| tensorflow.zeros | 7,020 |
import tensorflow as tf
if is_training:
steps = int(n_examples // batch_size * self._config.num_train_epochs)
else:
steps = n_examples // batch_size
return input_fn, steps
def serialize_examples(self, examples, is_training, output_file, batch_size):
"""Convert a set of `InputExample`s to a TFRecord file."""
n_examples = 0
with tf.python_io.TFRecordWriter(output_file) as writer:
for (ex_index, example) in enumerate(examples):
if ex_index % 2000 == 0:
utils.log("Writing example {:} of {:}".format(ex_index, len(examples)))
for tf_example in self._example_to_tf_example(
example, is_training, log=self._config.log_examples and ex_index < 1
):
writer.write(tf_example.SerializeToString())
n_examples += 1
| tensorflow.python_io.TFRecordWriter | 7,021 |
import tensorflow as tf
x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1)
y = tf.transpose(x, [1, 0])
kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2))
#print(kernel_matrix)
| tensorflow.exp | 7,022 |
import tensorflow as tf
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:],
weights=target_weights)
if chaining_loss is not None and chaining_loss_ratio:
xent_loss += chaining_loss_ratio * chaining_loss
losses = [xent_loss, None, None]
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def softmax(logits, dim=-1, mask=None):
e = tf.exp(logits)
if mask is not None:
e *= mask
return e / tf.clip_by_value(tf.reduce_sum(e, axis=dim, keep_dims=True), 10e-37, 10e+37)
def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True, rewards=None):
batch_size = tf.shape(targets)[0]
time_steps = tf.shape(targets)[1]
logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value]))
targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size]))
| tensorflow.exp | 7,023 |
import tensorflow as tf
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
for layer_idx in range(num_hidden_layers):
group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups)
with tf.variable_scope("group_%d" % group_idx):
with tf.name_scope("layer_%d" % layer_idx):
layer_output = prev_output
for inner_group_idx in range(inner_group_num):
with tf.variable_scope("inner_group_%d" % inner_group_idx):
| tensorflow.name_scope | 7,024 |
from tensorflow.python.ops import math_ops
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `alpha > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.beta) /
(math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 2., var,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
| tensorflow.python.ops.math_ops.square | 7,025 |
import tensorflow as tf
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def dense_layer_3d(input_tensor,
| tensorflow.reshape | 7,026 |
import tensorflow as tf
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias",[], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.reduce_sum(tf.multiply(output_layer,output_weights),-1)
logits = tf.add(logits, output_bias)
probabilities=tf.sigmoid(logits)
# labels=tf.constant(labels,dtype=tf.int32)
per_example_loss=tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits,reduction=Reduction.NONE)
per_example_loss=tf.reduce_sum(per_example_loss,axis=-1)
loss = tf.reduce_mean(per_example_loss,name='train_loss')
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
| tensorflow.sigmoid | 7,027 |
import tensorflow as tf
data_set = tf.data.TFRecordDataset(TFRECORD_PATH)
data_set = data_set.map(parse_function)
data_set = data_set.shuffle(buffer_size=9)
data_set = data_set.batch(3)
iterator = data_set.make_initializable_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer)
results, imgs = sess.run(next_element)
print('names: {}'.format(results['member/name']))
print('ages: {}'.format(results['member/age']))
| tensorflow.Session | 7,028 |
from tensorflow.contrib.framework import tensor_util
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels = tensor_util.remove_squeezable_dimensions(
predictions, labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
| tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions | 7,029 |
import tensorflow as tf
a = 7.5
return __phi_f(tf.minimum(x, a)) - __phi_f(a) + __phi_g(tf.maximum(x, a))
| tensorflow.minimum | 7,030 |
import tensorflow as tf
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.variable_scope(scope, reuse=reuse):
observations_ph = U.ensure_tf_input(make_obs_ph("observation"))
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0.0))
| tensorflow.variable_scope | 7,031 |
from tensorflow.contrib.learn.python.learn.preprocessing.text import CategoricalVocabulary
min_frequency: Minimum frequency of words in the vocabulary.
vocabulary: CategoricalVocabulary object.
Attributes:
vocabulary_: CategoricalVocabulary object.
"""
self.min_frequency = min_frequency
if vocabulary:
self.vocabulary_ = vocabulary
else:
self.vocabulary_ = CategoricalVocabulary(support_reverse=True)
if tokenizer_fn:
self._tokenizer = tokenizer_fn
else:
self._tokenizer = tokenizer
def fit(self, raw_documents, unused_y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Args:
| tensorflow.contrib.learn.python.learn.preprocessing.text.CategoricalVocabulary | 7,032 |
import tensorflow as tf
cur_batch_size,
config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')],
params['heatmap_size'],
params['train_image_size'])
# last_pred_mse = tf.metrics.mean_squared_error(score_map, targets,
# weights=1.0 / tf.cast(cur_batch_size, tf.float32),
# name='last_pred_mse')
# filter all invisible keypoint maybe better for this task
# all_visible = tf.logical_and(key_v>0, isvalid>0)
# targets_list = [tf.boolean_mask(targets_list[ind], all_visible) for ind in list(range(len(targets_list)))]
# pred_outputs = [tf.boolean_mask(pred_outputs[ind], all_visible, name='boolean_mask_{}'.format(ind)) for ind in list(range(len(pred_outputs)))]
all_visible = tf.expand_dims(tf.expand_dims(tf.cast(tf.logical_and(key_v>0, isvalid>0), tf.float32), axis=-1), axis=-1)
targets_list = [targets_list[ind] * all_visible for ind in list(range(len(targets_list)))]
pred_outputs = [pred_outputs[ind] * all_visible for ind in list(range(len(pred_outputs)))]
sq_diff = tf.reduce_sum(tf.squared_difference(targets, pred_outputs[-1]), axis=-1)
last_pred_mse = tf.metrics.mean_absolute_error(sq_diff, tf.zeros_like(sq_diff), name='last_pred_mse')
metrics = {'normalized_error': ne_mertric, 'last_pred_mse':last_pred_mse}
predictions = {'normalized_error': ne_mertric[1]}
ne_mertric = tf.identity(ne_mertric[1], name='ne_mertric')
base_learning_rate = params['learning_rate']
| tensorflow.logical_and | 7,033 |
import tensorflow as tf
cast_outputs_to_float(v)
else:
d[k] = tf.cast(v, tf.float32)
cast_outputs_to_float(model_outputs)
else:
model_outputs = _model_outputs()
# First check if it is in PREDICT mode.
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {}
predictions['detections'] = model_outputs['detections']
predictions['image_info'] = features['image_info']
if params['include_mask']:
predictions['mask_outputs'] = tf.nn.sigmoid(model_outputs['mask_outputs'])
if params['use_tpu']:
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Set up training loss and learning rate.
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rates.step_learning_rate_with_linear_warmup(
global_step,
params['init_learning_rate'],
params['warmup_learning_rate'],
params['warmup_steps'],
| tensorflow.nn.sigmoid | 7,034 |
from tensorflow.python.ops import array_ops
def logits_to_predictions(self, logits, proba=False):
if self.num_label_columns == 1:
return array_ops.squeeze(logits, squeeze_dims=[1])
return logits
| tensorflow.python.ops.array_ops.squeeze | 7,035 |
import tensorflow as tf
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1 - epsilon)
output = tf.log(output)
output_shape = output.get_shape()
| tensorflow.clip_by_value | 7,036 |
import tensorflow as tf
predicted_sdfs, predicted_classes, predicted_poses):
"""Update."""
if labeled_sdfs or labeled_classes:
print(labeled_sdfs)
mean_x = tf.reduce_mean(labeled_poses[1][:, 0])
mean_z = tf.reduce_mean(labeled_poses[1][:, 2])
samples_world = grid.generate(
(mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5),
[self.resolution, self.resolution, self.resolution])
samples_world = tf.reshape(samples_world, [-1, 3])
| tensorflow.reduce_mean | 7,037 |
import tensorflow as tf
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(labels, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), labels)
nom = tf.reduce_sum(weights * conf_mat)
denom = tf.reduce_sum(weights * tf.matmul(
tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) /
tf.to_float(batch_size))
try:
return -(1 - nom / denom)
except Exception:
| tensorflow.reduce_sum | 7,038 |
from tensorflow.python.ops import array_ops
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.pack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
label_is_neg = math_ops.logical_not(label_is_pos)
true_positives = _create_local('true_positives', shape=[num_thresholds])
false_negatives = _create_local('false_negatives', shape=[num_thresholds])
true_negatives = _create_local('true_negatives', shape=[num_thresholds])
| tensorflow.python.ops.array_ops.transpose | 7,039 |
from tensorflow.python.framework import ops
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(mod, "mod")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return logical_and(logical_or(x, y), logical_not(logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(logical_and, "and")
_OverrideBinaryOperatorHelper(logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", less)
ops.Tensor._override_operator("__le__", less_equal)
ops.Tensor._override_operator("__gt__", greater)
ops.Tensor._override_operator("__ge__", greater_equal)
def range(start, limit, delta=1, name="range"):
"""Creates a sequence of integers.
This operation creates a sequence of integers that begins at `start` and
extends by increments of `delta` up to but not including `limit`.
For example:
```
# 'start' is 3
| tensorflow.python.framework.ops.Tensor._override_operator | 7,040 |
import tensorflow as tf
old_action_op, old_action_op_params = self._build_anet(
'old_action_op', trainable=False)
# 定義輸出範例
self.sample_op = tf.squeeze(
action_op.sample(1),
axis=0) # operation of choosing action
# 更新
self.update_old_action_op_op = [
olda.assign(a) for a, olda in zip(
action_op_params, old_action_op_params)]
# 定義輸入變數
self.tfa = tf.placeholder(tf.float32, [None, action_dim], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# 機率比較
ratio = action_op.prob(self.tfa) / \
(old_action_op.prob(self.tfa) + 1e-5)
# 替代損失
surr = ratio * self.tfadv
# 減少代理損失
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
# log
self.train_writer = tf.summary.FileWriter("logs/", self.sess.graph)
| tensorflow.placeholder | 7,041 |
import tensorflow as tf
self.ch_len = tf.reshape(tf.reduce_sum(
tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1])
self.qh_len = tf.reshape(tf.reduce_sum(
tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1])
N, PL, QL, CL, d, dc, nh = self._params()
if self.config.fix_pretrained_vector:
dc = self.char_mat.get_shape()[-1]
with tf.variable_scope("Input_Embedding_Layer"):
ch_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.ch), [N * PL * self.max_p_num, CL, dc])
qh_emb = tf.reshape(tf.nn.embedding_lookup(
self.char_mat, self.qh), [N * QL * self.max_p_num, CL, dc])
ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout)
qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout)
ch_emb = conv(ch_emb, d,
| tensorflow.variable_scope | 7,042 |
from tensorflow.python.ops import array_ops
labels_sizes = set_ops.set_size(labels)
return math_ops.minimum(labels_sizes, k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
| tensorflow.python.ops.array_ops.fill | 7,043 |
from tensorflow.python.ops import math_ops
# Local variable to accumulate the predictions in the confusion matrix.
cm_dtype = dtypes.int64 if weights is not None else dtypes.float64
total_cm = _create_local('total_confusion_matrix',
shape=[num_classes, num_classes], dtype=cm_dtype)
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
predictions_rank = predictions.get_shape().ndims
| tensorflow.python.ops.math_ops.to_int64 | 7,044 |
import tensorflow as tf
reuse = True, # Share the weights between passage and question
bias = False,
dropout = self.dropout)
with tf.variable_scope("Context_to_Query_Attention_Layer"):
# C = tf.tile(tf.expand_dims(c,2),[1,1,self.q_maxlen,1])
# Q = tf.tile(tf.expand_dims(q,1),[1,self.c_maxlen,1,1])
# S = trilinear([C, Q, C*Q], input_keep_prob = 1.0 - self.dropout)
S = optimized_trilinear_for_attention([c, q], self.c_maxlen, self.q_maxlen, input_keep_prob = 1.0 - self.dropout)
mask_q = tf.expand_dims(self.q_mask, 1)
S_ = tf.nn.softmax(mask_logits(S, mask = mask_q))
mask_c = tf.expand_dims(self.c_mask, 2)
S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask = mask_c), dim = 1),(0,2,1))
self.c2q = tf.matmul(S_, q)
self.q2c = tf.matmul(tf.matmul(S_, S_T), c)
attention_outputs = [c, self.c2q, c * self.c2q, c * self.q2c]
with tf.variable_scope("Model_Encoder_Layer"):
inputs = tf.concat(attention_outputs, axis = -1)
self.enc = [conv(inputs, d, name = "input_projection")]
for i in range(3):
if i % 2 == 0: # dropout every 2 blocks
| tensorflow.expand_dims | 7,045 |
import tensorflow as tf
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
| tensorflow.reduce_mean | 7,046 |
import tensorflow as tf
y_w_diff = y[:, :, 1:] - y[:, :, :-1]
h_diff = tf.abs(tf.abs(x_h_diff) - tf.abs(y_h_diff))
w_diff = tf.abs(tf.abs(x_w_diff) - tf.abs(y_w_diff))
return h_diff + tf.transpose(w_diff)
def leaky_relu(x, leak=0.2, name='leaky_relu'):
| tensorflow.transpose | 7,047 |
import tensorflow as tf
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/target_net')
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/eval_net')
| tensorflow.get_collection | 7,048 |
import tensorflow as tf
in_dim = input_dims[-1]
flat_input = input_data
if initial_value is None:
fc_weight = tf.get_variable("weights", shape=[in_dim, out_dim], initializer=tf.random_normal_initializer(mean=0., stddev=0.01))
fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=tf.constant_initializer(0.0))
else:
fc_weight = tf.get_variable("weights", initializer=initial_value[0])
fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=initial_value[1])
if use_bias:
output = tf.add(tf.matmul(flat_input, fc_weight), fc_bias)
else:
output = tf.matmul(flat_input, fc_weight)
if non_linear_fn is None:
return output
else:
activation = non_linear_fn(output)
return activation
def batch_norm(x, b_train, scope, reuse=False):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
n_out = x.get_shape().as_list()[-1]
| tensorflow.matmul | 7,049 |
import tensorflow as tf
grad_and_var = (grad, var)
average_grads.append(grad_and_var)
return average_grads
def binary_mask(shape, p=0.7):
samples = tf.random_uniform(shape, minval=0.0, maxval=1.0)
mask = tf.less_equal(samples, p)
return tf.cast(mask, tf.float32)
def weighted_arithmetic_mean(w, x):
numer = tf.reduce_sum(w*x)
denom = tf.reduce_sum(w)
return tf.div(numer, denom)
| tensorflow.cast | 7,050 |
import tensorflow as tf
ch_emb = tf.reduce_max(ch_emb, axis = 1)
qh_emb = tf.reduce_max(qh_emb, axis = 1)
| tensorflow.reduce_max | 7,051 |
import tensorflow as tf
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
return output
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
| tensorflow.shape | 7,052 |
import tensorflow as tf
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
output = f(tf.constant([[1, 3]]), tf.constant([2]))
tf.train.start_queue_runners()
result, batch_size = session.run(output)
self.assertAllEqual([[3, 5]], result)
self.assertAllEqual([1], batch_size)
def test_two(self):
| tensorflow.train.start_queue_runners | 7,053 |
import tensorflow as tf
offset: (height, width)
Returns:
"""
with tf.variable_scope('anchor_generator'):
if offset is None:
offset = [stride[0]/2, stride[1]/2]
features_width = tf.cast(features_width, tf.int32)
features_height = tf.cast(features_height, tf.int32)
scales = tf.convert_to_tensor(scales, dtype=tf.float32)
ratios = tf.convert_to_tensor(ratios, dtype=tf.float32)
offset = tf.convert_to_tensor(offset, dtype=tf.float32)
scales_grid, ratios_grid = tf.meshgrid(scales,
ratios)
scales_grid = tf.reshape(scales_grid, [-1, 1])
ratios_grid = tf.reshape(ratios_grid, [-1, 1])
ratio_sqrts = tf.sqrt(ratios_grid)
| tensorflow.convert_to_tensor | 7,054 |
from tensorflow.python.ops import math_ops
Returns:
value_tensor: A tensor representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'true_positives', [predictions, labels]):
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 1))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
def _streaming_false_positives(predictions, labels, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
| tensorflow.python.ops.math_ops.equal | 7,055 |
import tensorflow as tf
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def lstm_contextualize(self, text_emb, text_len, text_len_mask):
num_sentences = tf.shape(text_emb)[0]
current_inputs = text_emb # [num_sentences, max_sentence_length, emb]
| tensorflow.shape | 7,056 |
import tensorflow as tf
for l in range(1, L):
parameters['w' + str(l)] = tf.get_variable('w' + str(l), [layers[l], layers[l-1]],dtype= 'float64' , initializer= tf.contrib.layers.xavier_initializer(seed=1) )
parameters['b' + str(l)] = tf.get_variable('b' + str(l), [layers[l], 1],dtype= 'float64', initializer = tf.zeros_initializer())
parameters['beta' + str(l)] = tf.get_variable('beta'+ str(l), [layers[l], 1], dtype= 'float64', initializer = init, trainable= train )
| tensorflow.zeros_initializer | 7,057 |
import tensorflow as tf
Parameters
----------
x: input tensor.
axis: axis along which to perform normalization.
Returns
-------
A tensor.
"""
if axis < 0:
axis = axis % len(x.get_shape())
return tf.nn.l2_normalize(x, dim=axis)
def categorical_crossentropy(output, target, from_logits=False):
"""Categorical crossentropy between an output tensor
and a target tensor, where the target is a tensor of the same
shape as the output.
# TODO(rbharath): Should probably swap this over to tf mode.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
| tensorflow.nn.l2_normalize | 7,058 |
import tensorflow as tf
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,3])
t = tf.nn.conv2d_transpose(input_var,v_norm,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
| tensorflow.nn.moments | 7,059 |
import tensorflow as tf
x=all_images, y=all_y, is_training=is_training)
z_projs = self._latent_projections(d_latents)
d_real, d_fake, _, _ = tf.split(d_all, 4)
d_real_logits, d_fake_logits, _, _ = tf.split(d_all_logits, 4)
z_projs_real, z_projs_fake, z_aug_projs_real, z_aug_projs_fake = tf.split(z_projs, 4)
self.d_loss, _, _, self.g_loss = loss_lib.get_losses(
d_real=d_real, d_fake=d_fake, d_real_logits=d_real_logits,
| tensorflow.split | 7,060 |
import tensorflow as tf
hooks = []
if FLAGS.use_hvd:
hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if hvd.rank() == -1: #if debug, set 0
CLIDebugHook = tf_debug.LocalCLIDebugHook(ui_type='readline')
CLIDebugHook.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
hooks.append(CLIDebugHook)
if FLAGS.profile and hvd.rank() == 0:
ProfilerHook = tf.train.ProfilerHook(save_steps=FLAGS.hooking_frequence, output_dir=FLAGS.output_dir, show_dataflow=True, show_memory=True)
hooks.append(ProfilerHook)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
| tensorflow.train.ProfilerHook | 7,061 |
import tensorflow as tf
# add final layer bias
b = tf.Variable(tf.constant(0.1, shape=[nb_classes]), name="b")
# calc l2 losses
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
| tensorflow.nn.l2_loss | 7,062 |
import tensorflow as tf
next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)
next_values = weight.values
next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)])
next_shape = tf.cast(next_shape, tf.int64)
next_adj = tf.SparseTensor(next_indices, next_values, next_shape)
next_adj = tf.sparse_reorder(next_adj)
nodes_list.append(next_nodes)
adj_list.append(next_adj)
nodes = next_nodes
return nodes_list, adj_list
| tensorflow.sparse_reorder | 7,063 |
from tensorflow.python.framework import ops
self._sig.name = func_name
def _to_argdef_list(args):
names = [n for n, t in args]
if len(names) != len(set(names)):
raise ValueError("Expected names to all be unique: %s" % str(names))
return [op_def_pb2.OpDef.ArgDef(type=t.as_datatype_enum, name=n)
for n, t in args]
self._sig.input_arg.extend(_to_argdef_list(inputs))
self._sig.output_arg.extend(_to_argdef_list(outputs))
def __call__(self, *inputs, **kwargs):
inputs = [ops.convert_to_tensor(_) for _ in inputs]
return _call(self._sig, *inputs, **kwargs)
| tensorflow.python.framework.ops.convert_to_tensor | 7,064 |
from tensorflow.python.framework import ops
input_tensor = op.inputs[0]
guide_tensor = op.inputs[1]
return [guide_tensor.get_shape().concatenate(input_tensor.get_shape()[-1])]
@ops.RegisterShape('BilateralSliceApply')
def _bilateral_slice_shape(op):
grid_tensor = op.inputs[0]
guide_tensor = op.inputs[1]
input_tensor = op.inputs[2]
| tensorflow.python.framework.ops.RegisterShape | 7,065 |
import tensorflow as tf
def process(policy, dataloader, top_k):
mean_kacc = np.zeros(len(top_k))
n_samples_processed = 0
for batch in dataloader:
if policy['type'] == 'gcnn':
c, ei, ev, v, n_cs, n_vs, n_cands, cands, best_cands, cand_scores = batch
pred_scores = policy['model']((c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)), tf.convert_to_tensor(False))
# filter candidate variables
pred_scores = tf.expand_dims(tf.gather(tf.squeeze(pred_scores, 0), cands), 0)
elif policy['type'] == 'ml-competitor':
cand_feats, n_cands, best_cands, cand_scores = batch
# move to numpy
cand_feats = cand_feats.numpy()
| tensorflow.reduce_sum | 7,066 |
from tensorflow.python.platform import gfile
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
# Create a second helper, identical to the first.
save2 = tf.train.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
| tensorflow.python.platform.gfile.Exists | 7,067 |
from tensorflow.python.ops import gen_nn_ops
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.op_scope([value], name, "AvgPool") as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._avg_pool(value, ksize=ksize, strides=strides,
padding=padding,
data_format=data_format,
name=name)
| tensorflow.python.ops.gen_nn_ops._avg_pool | 7,068 |
import tensorflow as tf
# Pad input.
x_ = _pad_input(
x, ksize, strides, padding, bsize=[1, blk_shape[1], blk_shape[2], 1], bstrides=bstrides)
# Convolution when number of indices is larger than zero.
def _conv_nonzero():
# Gather patches.
p = tf.gather_nd(x_, blk_indices_)
# Reshape patches.
p = tf.reshape(p, [blk_shape[0], blk_shape[1], blk_shape[2], -1])
# Convolution on patches.
q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True)
| tensorflow.gather_nd | 7,069 |
import tensorflow as tf
w = tf.reshape(w, [hidden_size, num_attention_heads, head_size])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * head_size],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, head_size])
ret = tf.einsum("BFH,HND->BFND", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
| tensorflow.einsum | 7,070 |
from tensorflow.contrib.rnn import GRUCell
highway_input = tf.layers.dense(highway_input, half_depth)
# 4-layer HighwayNet:
for i in range(4):
highway_input = highwaynet(highway_input, 'highway_%d' % (i + 1), half_depth)
rnn_input = highway_input
# Bidirectional RNN
outputs, states = tf.nn.bidirectional_dynamic_rnn(
GRUCell(half_depth),
GRUCell(half_depth),
rnn_input,
sequence_length=input_lengths,
dtype=tf.float32)
return tf.concat(outputs, axis=2) # Concat forward and backward
def highwaynet(inputs, scope, depth):
with tf.variable_scope(scope):
H = tf.layers.dense(
| tensorflow.contrib.rnn.GRUCell | 7,071 |
import tensorflow as tf
units=10,
activation=None,
use_bias=False,
name='out'
)
return out, img_summary
def main(_):
tf.reset_default_graph()
# Import data
cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir)
with tf.variable_scope('inputs'):
# Create the model
x = tf.placeholder(tf.float32, [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes])
# Whether model is training
train = tf.placeholder(tf.bool, [])
# Build the graph for the deep net
y_conv, img_summary = deepnn(x, train)
# Define your loss function - softmax_cross_entropy
with tf.variable_scope('x_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
| tensorflow.variable_scope | 7,072 |
import tensorflow as tf
tgt = tf.expand_dims(targets, axis=0)
flat_with_target = tf.concat([flat, tgt], axis=0)
| tensorflow.concat | 7,073 |
import tensorflow as tf
# products of decays isn't ideal numerically, in particular if any of the
# decays are zero it results in NaNs.
with tf.name_scope(name, values=[sequence, decay, initial_value]):
if sequence_lengths is not None:
# Zero out sequence and decay beyond sequence_lengths.
with tf.control_dependencies(
[tf.assert_equal(sequence.shape[0], decay.shape[0])]):
mask = tf.sequence_mask(sequence_lengths, maxlen=sequence.shape[0],
dtype=sequence.dtype)
mask = tf.transpose(mask)
# Adding trailing dimensions to mask to allow for broadcasting.
| tensorflow.assert_equal | 7,074 |
import tensorflow as tf
if var_shape == saved_shapes[key]:
var_list[key] = var
var_name = var.name[:var.name.index(':')]
msg.append('- restoring variable: {}'.format(var_name)
if var_name == key else
'- restoring variable {} from {}'.format(var_name, key))
else:
msg.append(Color.yellow(
'- variable({}) with inconsistent shape: {}(graph) != {}(ckpt)'.format(
key, var_shape, saved_shapes[key])
))
if len(var_list) != 0:
msg += ['- total variable count: {}'.format(len(var_list))]
logger.info('\n'.join(msg))
saver = tf.train.Saver(var_list, name='tk_restore')
saver.restore(sess, self.restore_ckpt_file)
return True
else:
logger.warn(Color.yellow('No vars need to restore from file: {}'.format(self.restore_ckpt_file)))
return False
def __str__(self):
content = 'RESTORE_OPTIMISTIC: %s' \
'\nRESTORE_CHECKPOINT_FILE: %s' % (self._restore_optimistic, self.restore_ckpt_file)
return content
| tensorflow.train.Saver | 7,075 |
from tensorflow.contrib.framework.python.framework import checkpoint_utils
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
| tensorflow.contrib.framework.python.framework.checkpoint_utils.load_variable | 7,076 |
import tensorflow as tf
return x
def lstm():
'''
Build LSTM cell
'''
pass
def loss(logits, labels):
'''
Compute loss
'''
with tf.name_scope('loss') as scope:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross-entropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope+'/loss', loss)
return loss
def accuracy(logits, labels):
'''
Evaluate the quality of the logits at predicting the label
'''
# for summary
with tf.name_scope('accuracy') as scope:
correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1))
correct = tf.cast(correct, tf.float32)
| tensorflow.reduce_mean | 7,077 |
import tensorflow as tf
self.Y_hat = training_decoder_output.rnn_output
out_decoder2 = tf.reshape(self.Y_hat, [tf.shape(self.Y_hat)[0], -1, n_mels])
dec = conv1d_banks(out_decoder2, K=decoder_num_banks, is_training=self.training)
dec = tf.layers.max_pooling1d(dec, pool_size=2, strides=1, padding="same")
dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-1", padding="SAME")
dec = tf.nn.relu(tf.layers.batch_normalization(dec, training=self.training))
dec = tf.layers.conv1d(dec, embed_size // 2, 3, name="decoder-conv1-2", padding="SAME")
dec = tf.layers.batch_normalization(dec, training=self.training)
dec = tf.layers.dense(dec, embed_size // 2)
for i in range(4):
dec = highwaynet(
dec, num_units=embed_size // 2, scope="decoder-highwaynet-{}".format(i)
)
with tf.variable_scope("decoder-gru", reuse=False):
cell = tf.contrib.rnn.GRUCell(embed_size // 2)
| tensorflow.layers.batch_normalization | 7,078 |
import tensorflow as tf
in_channel = in_shape[3]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
padding = padding.upper()
if isinstance(k_size, list):
filter_shape = [k_size[0], k_size[1]] + [in_channel, out_dims]
else:
filter_shape = [k_size, k_size] + [in_channel, out_dims]
if w_init is None:
w_init = tf.contrib.layers.variance_scaling_initializer()
if b_init is None:
b_init = tf.constant_initializer()
w = tf.get_variable('W', filter_shape, initializer=w_init)
b = None
if use_bias:
b = tf.get_variable('b', [out_dims], initializer=b_init)
conv = tf.nn.atrous_conv2d(value=input_tensor, filters=w, rate=rate,
padding=padding, name='dilation_conv')
if use_bias:
ret = tf.add(conv, b)
| tensorflow.constant_initializer | 7,079 |
import tensorflow as tf
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
| tensorflow.shape | 7,080 |
import tensorflow as tf
word_probs = _clip_and_normalize(word_probs, epsilon)
one_hot_spare_rep = tf.one_hot(answers, vsize)
xent = -tf.reduce_sum(one_hot_spare_rep * tf.log(word_probs), axis=-1) # [batch_size, max_dec_steps]
if loss_weights != None:
xent = xent * loss_weights
xent = tf.reduce_sum(xent, axis=-1)
| tensorflow.log | 7,081 |
import tensorflow as tf
if self.config.decay is not None:
self.var_ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema_op = self.var_ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
self.shadow_vars = []
| tensorflow.control_dependencies | 7,082 |
import tensorflow as tf
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
| tensorflow.logging.info | 7,083 |
import tensorflow as tf
for p in warmup_vfn.parameters(): p.invalidate()
for p in warmup_model.parameters(): p.invalidate()
for p in policy.parameters(): p.invalidate()
task.parameters().invalidate()
pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])
print ("After WARMUP, pol_params_norm:", np.linalg.norm(pol_params), "warm_params_norm:", np.linalg.norm(warm_params))
mod, warm_mod = tf.get_default_session().run([nn.utils.parameters_to_vector(model.parameters()), nn.utils.parameters_to_vector(warmup_model.parameters())])
print ("mod_norm:", np.linalg.norm(mod), "warm_mod_norm:", np.linalg.norm(warm_mod))
eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')
warmup_collect_virt = []
eval_rollout(runners['train'], policy, 'Use policy to collect data from virtual env')
| tensorflow.get_default_session | 7,084 |
import tensorflow as tf
mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32)
start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32)
| tensorflow.random.uniform | 7,085 |
import tensorflow as tf
if __name__ == "__main__":
tf.test.main()
| tensorflow.test.main | 7,086 |
import tensorflow as tf
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2
per_anchor_loss = loss - lambda_term
per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2)
# Normalize the AUC such that a perfect score function will have AUC 1.0.
# Because precision_range is discretized into num_anchors + 1 intervals
# but only num_anchors terms are included in the Riemann sum, the
| tensorflow.reduce_sum | 7,087 |
import tensorflow as tf
tf.TensorShape(spec['shape']))
# Used for input shapes of the prediction network
if self.data_shape is None:
self.data_shape = output_shapes
# Handle for the feedable iterator
self.handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
self.handle, output_types, output_shapes)
data = iterator.get_next()
# Build the actual training and evaluation models
self._train_graph(data)
self._eval_graph(data)
self.summaries = tf.summary.merge_all()
| tensorflow.data.Iterator.from_string_handle | 7,088 |
import tensorflow as tf
self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1)
self.dropout = tf.placeholder(tf.float32, name="dropout")
self.global_step = tf.Variable(0, name="global_step", trainable=False)
"""
:descrition: The embedding layer, question and passage share embeddings
"""
def _embed(self):
with tf.variable_scope('word_char_embedding'):
if self.config.fix_pretrained_vector:
self.pretrained_word_mat = tf.get_variable("word_emb_mat",
[self.vocab.word_size() - 2, self.vocab.word_embed_dim],
dtype=tf.float32,
initializer=tf.constant_initializer(
self.vocab.word_embeddings[2:],
dtype=tf.float32),
trainable=False)
| tensorflow.variable_scope | 7,089 |
import tensorflow as tf
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg)
mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg)
# sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg)
sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5))
sigma = tf.clip_by_value(sigma, 0.0, 1.0)
norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def build_cnet(self, state_in, name, reuse=False):
reg = tf.contrib.layers.l2_regularizer(1e-3)
with tf.variable_scope(name, reuse=reuse):
layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg)
layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg)
vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg)
| tensorflow.get_collection | 7,090 |
import tensorflow as tf
if target_weight_strategy == 'rect':
avg_neg_log_lhood = tf.reduce_mean(neg_log_lhoods)
else:
neg_log_lhoods = tf.multiply(neg_log_lhoods, target_weights)
# be careful to have at least one weight be nonzero
# should we be taking the mean elem-wise by batch? i think this is a big bug
avg_neg_log_lhood = tf.reduce_sum(neg_log_lhoods) / tf.reduce_sum(target_weights)
neg_log_lhoods_inspect = tf.reshape(neg_log_lhoods, [batch_size, rnn_nunroll])
# Train op
if mode == 'train':
lr = tf.Variable(0.0, trainable=False)
self._lr = lr
self._lr_summary = tf.summary.scalar('learning_rate', self._lr)
tvars = tf.trainable_variables()
grads = tf.gradients(avg_neg_log_lhood, tvars)
if grad_clip > 0.0:
grads, _ = tf.clip_by_global_norm(grads, grad_clip)
if opt == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr)
else:
raise NotImplementedError()
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=tf.contrib.framework.get_or_create_global_step())
# Tensor exports
self.feats_audio = feats_audio_nunroll
self.feats_other = feats_other_nunroll
| tensorflow.trainable_variables | 7,091 |
from tensorflow.contrib.framework import deprecated_arg_values
# configuration values to **kwargs so that updates to the export_estimator
# function don't have to be reflected here.
@deprecated_arg_values(
"2016-09-23",
| tensorflow.contrib.framework.deprecated_arg_values | 7,092 |
import tensorflow as tf
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
| tensorflow.gfile.GFile | 7,093 |
import tensorflow as tf
@dynamic_batching.batch_fn
def f(a, b):
return a + b
output0 = f(tf.constant([1]), tf.constant([2]))
output1 = f(tf.constant([[2]]), tf.constant([3]))
tp = pool.ThreadPool(2)
f0 = tp.apply_async(session.run, [output0])
f1 = tp.apply_async(session.run, [output1])
| tensorflow.constant | 7,094 |
import tensorflow as tf
*sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled.
"""
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if sample_size is None:
max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32))
else:
| tensorflow.logical_and | 7,095 |
import tensorflow as tf
l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
# sample mixture indicator from softmax
sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
tf.shape(logit_probs), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
# select logistic parameters
means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
log_scales = tf.maximum(tf.reduce_sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
coeffs = tf.reduce_sum(tf.nn.tanh(
l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
# sample from logistic & clip to interval
| tensorflow.reduce_sum | 7,096 |
import tensorflow as tf
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
| tensorflow.constant_initializer | 7,097 |
import tensorflow as tf
self._sess.run(m.vars_assign_op, feed_dict=var_feeddict)
def _make_placeholders(self):
w = self._train_params['image_size']
h = self._train_params['image_size']
in_ch = 3 # Num channels of input images
train_images_ph = tf.placeholder(tf.int32, name='train_images_ph', shape=(None, w, h, in_ch)) # Train images
pred_images_ph = tf.placeholder(tf.int32, name='pred_images_ph', shape=(None, w, h, in_ch)) # Predict images
train_classes_ph = tf.placeholder(tf.int32, name='train_classes_ph', shape=(None,)) # Train classes
pred_classes_ph = tf.placeholder(tf.int32, name='pred_classes_ph', shape=(None,)) # Predict classes
normal_arch_ph = tf.placeholder(tf.int32, name='normal_arch_ph', shape=(CELL_NUM_BLOCKS, 4))
reduction_arch_ph = tf.placeholder(tf.int32, name='reduction_arch_ph', shape=(CELL_NUM_BLOCKS, 4))
return _ModelPlaceholder(train_images_ph, train_classes_ph, pred_images_ph, pred_classes_ph,
normal_arch_ph, reduction_arch_ph)
def _forward(self, X, step, normal_arch, reduction_arch, is_train=False, **knobs):
K = self._train_params['K'] # No. of classes
in_ch = 3 # Num channels of input images
| tensorflow.placeholder | 7,098 |
import tensorflow as tf
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
| tensorflow.logging.info | 7,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.