seed
stringlengths 25
2.89k
| seed_api
stringlengths 14
102
| index
int64 0
14.8k
|
---|---|---|
import tensorflow as tf
else:
block_conv_input = bottom
input_filter = bottom.get_shape().as_list()[-1]
block_conv_1 = self.conv_layer(bottom, 1, input_filter, channel_list[0], 1, name + "_branch2a")
block_norm_1 = tf.layers.batch_normalization(inputs=block_conv_1, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True)
block_relu_1 = tf.nn.relu(block_norm_1)
block_conv_2 = self.conv_layer(block_relu_1, 3, channel_list[0], channel_list[1], 1, name + "_branch2b")
block_norm_2 = tf.layers.batch_normalization(inputs=block_conv_2, axis = 3, momentum=configs['_BATCH_NORM_DECAY'], epsilon=configs['_BATCH_NORM_EPSILON'], center=True, scale=True, training=self.is_training, fused=True)
block_relu_2 = tf.nn.relu(block_norm_2)
| tensorflow.nn.relu | 5,600 |
import tensorflow as tf
learning_rate,
num_train_steps,
num_warmup_steps,
use_tpu,
use_one_hot_embeddings,
):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
| tensorflow.logging.info | 5,601 |
import tensorflow as tf
output_dtype: Optional[tf.DType] = None):
"""More efficient combined `mean` and `var`. See `var`."""
if output_dtype is None:
output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype)
if output_dtype is None:
raise TypeError('Tensor type %r is not supported' % x.dtype)
if not reduce_instance_dims and isinstance(x, tf.RaggedTensor):
raise NotImplementedError(
'Elementwise mean_and_var does not support RaggedTensors.')
with tf.compat.v1.name_scope('mean_and_var'):
x = tf.cast(x, output_dtype)
x_count, x_mean, x_variance = (
tf_utils.reduce_batch_count_mean_and_var(x, reduce_instance_dims))
combine_inputs = _WeightedMeanAndVarAccumulator(
count=x_count,
mean=x_mean,
| tensorflow.compat.v1.name_scope | 5,602 |
import tensorflow as tf
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def create_model(config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings, task_name,):
"""Creates a classification model from_scratch."""
_true_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), dtype=tf.int32)
with tf.variable_scope("baseline"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(word_embedding_output,
output_embedding_table) = modeling.embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.embedding_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
embedding_output = modeling.embedding_postprocessor(
input_tensor=word_embedding_output,
| tensorflow.variable_scope | 5,603 |
import tensorflow as tf
if( data_format =='NCHW' ):
self.strides = [1, 1, d_h, d_w]
else:
self.strides = [1, d_h, d_w, 1]
def __call__(self,input_var,name=None,**xargs):
shapes = tf.shape(input_var)
if( self.data_format == 'NCHW' ):
shapes = tf.stack([shapes[0],tf.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]])
else:
shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,self.w,output_shape=shapes,
data_format=self.data_format,
strides=self.strides,padding='SAME'),
self.b,data_format=self.data_format,name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormTransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NHWC',epsilon=1e-9) :
with tf.variable_scope(name) :
assert data_format == 'NHWC'
self.v = tf.get_variable('v', [k_h, k_w, out_dim, input_dim],
| tensorflow.nn.conv2d_transpose | 5,604 |
import tensorflow as tf
def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,
ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,
| tensorflow.shape | 5,605 |
import tensorflow as tf
msg = '`perm` must be a vector.'
if perm.shape.ndims is not None:
if perm.shape.ndims != 1:
raise ValueError(
msg[:-1] + ', saw rank: {}.'.format(perm.shape.ndims))
elif validate_args:
assertions += [tf.compat.v1.assert_rank(perm, 1, message=msg)]
perm_ = tf.get_static_value(perm)
msg = '`perm` must be a valid permutation vector.'
if perm_ is not None:
if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):
raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))
| tensorflow.compat.v1.assert_rank | 5,606 |
import tensorflow as tf
prediction = tf.nn.softmax(logits, name = name + '_softmax')
return logits, prediction
def general_conv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm=True, relu_factor = 0, name="conv2d"):
with tf.variable_scope(name):
conv = tf.layers.conv2d(input_data, filters, kernel_size, stride, padding, activation=None)
if do_norm:
conv = tf.layers.batch_normalization(conv, momentum=0.9)
if activation_function == "relu":
conv = tf.nn.relu(conv, name = 'relu')
if activation_function == "leakyrelu":
conv = tf.nn.leaky_relu(conv, alpha=relu_factor)
if activation_function == "elu":
conv = tf.nn.elu(conv, name = 'elu')
return conv
def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = "relu", padding = "VALID", do_norm = True, relu_factor = 0, name="deconv2d"):
with tf.variable_scope(name):
deconv = tf.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None)
| tensorflow.nn.relu | 5,607 |
import tensorflow as tf
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=False,
num_additional_channels=predict_input_config.num_additional_channels)
input_dict = transform_fn(decoder.decode(example))
images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32)
images = tf.expand_dims(images, axis=0)
true_image_shape = tf.expand_dims(
input_dict[fields.InputDataFields.true_image_shape], axis=0)
return tf.estimator.export.ServingInputReceiver(
features={
fields.InputDataFields.image: images,
fields.InputDataFields.true_image_shape: true_image_shape},
receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})
return _predict_input_fn | tensorflow.estimator.export.ServingInputReceiver | 5,608 |
from tensorflow.python.ops import array_ops
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
| tensorflow.python.ops.array_ops.ones_like | 5,609 |
import tensorflow as tf
ratio = tf.clip_by_value(ratio, 0, 10)
surr1 = batch['advantage'] * ratio
surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay)
loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2))
| tensorflow.clip_by_value | 5,610 |
import tensorflow as tf
def _word_embedding(self, inputs, reuse=False):
with tf.variable_scope('word_embedding', reuse=reuse):
w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer)
x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M)
return x
def _project_features(self, features):
with tf.variable_scope('project_features'):
w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer)
features_flat = tf.reshape(features, [-1, self.D])
features_proj = tf.matmul(features_flat, w)
features_proj = tf.reshape(features_proj, [-1, self.L, self.D])
return features_proj
def _attention_layer(self, features, features_proj, h, reuse=False):
with tf.variable_scope('attention_layer', reuse=reuse):
w = tf.get_variable('w', [self.H, self.D], initializer=self.weight_initializer)
b = tf.get_variable('b', [self.D], initializer=self.const_initializer)
| tensorflow.reshape | 5,611 |
import tensorflow as tf
def testAddCollectionDef(self):
test_dir = self._TestDir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
var = tf.Variable(tf.constant(0, dtype=tf.int64))
count_up_to = var.count_up_to(3)
input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue")
qr = tf.train.QueueRunner(input_queue, [count_up_to])
tf.initialize_all_variables()
| tensorflow.Variable | 5,612 |
import tensorflow as tf
crit_dis = tf.reduce_mean(tf.square(crit_real - tf.ones_like(crit_real))) + tf.reduce_mean(tf.square(crit_fake - tf.zeros_like(crit_fake)))
crit_gen = tf.reduce_mean(tf.square(crit_fake - tf.ones_like(crit_fake)))
| tensorflow.ones_like | 5,613 |
import tensorflow as tf
self.strides = [1,1,1]
self.dilates = [d_t, d_h, d_w]
def __call__(self,input_var,name=None) :
k_t,k_h,k_w,_,_ = self.w.get_shape().as_list()
_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC")
return tf.nn.bias_add(
tf.nn.convolution(_t, self.w,
strides=self.strides, dilation_rate=self.dilates,
padding='VALID'),
self.b,name=name)
class Linear(object) :
def __init__(self,name,input_dim,output_dim,stddev=0.02) :
with tf.variable_scope(name) :
self.w = tf.get_variable('w',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(0.0))
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
return tf.matmul(tf.reshape(input_var,[-1,dims]),w) + b
| tensorflow.variable_scope | 5,614 |
import tensorflow as tf
loss_np, _, label_np, image_np, inf_np = sess.run(
[loss, opti, batch_label, batch_image, inf],feed_dict=feed_dict)
if i > 0 and i % report_step == 0:
accuracy_np = sess.run([accuracy],feed_dict=feed_dict)
print(i, accuracy_np, loss_np)
if i > 0 and i % save_step == 0:
tf.train.Saver().save(sess, path)
tf.train.Saver().save(sess, path)
coord.request_stop()
coord.join(threads)
| tensorflow.train.Saver | 5,615 |
import tensorflow as tf
log("Tacotron training set to a maximum of {} steps".format(args.tacotron_train_steps))
# Memory allocation on the GPU as needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
# Train
with tf.Session(config=config) as sess:
try:
summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph)
sess.run(tf.global_variables_initializer())
# saved model restoring
if args.restore:
# Restore saved model if the user requested it, default = True
try:
checkpoint_state = tf.train.get_checkpoint_state(save_dir)
if checkpoint_state and checkpoint_state.model_checkpoint_path:
log("Loading checkpoint {}".format(checkpoint_state.model_checkpoint_path),
slack=True)
saver.restore(sess, checkpoint_state.model_checkpoint_path)
| tensorflow.global_variables_initializer | 5,616 |
import tensorflow as tf
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
if mask is not None:
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
| tensorflow.where | 5,617 |
import tensorflow as tf
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
operation = tf.assign(perturbed_var,
var + tf.random_normal(shape=tf.shape(var), mean=0.,
stddev=param_noise_scale))
else:
# Do not perturb, just assign.
operation = tf.assign(perturbed_var, var)
perturb_ops.append(operation)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
with tf.variable_scope("adaptive_model", reuse=False):
adaptive_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
perturb_for_adaption = perturb_vars(original_scope="model", perturbed_scope="adaptive_model/model")
kl_loss = tf.reduce_sum(
tf.nn.softmax(policy.q_values) *
(tf.log(tf.nn.softmax(policy.q_values)) - tf.log(tf.nn.softmax(adaptive_policy.q_values))),
axis=-1)
mean_kl = tf.reduce_mean(kl_loss)
| tensorflow.group | 5,618 |
import tensorflow as tf
Compute loss
'''
with tf.name_scope('loss') as scope:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross-entropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope+'/loss', loss)
return loss
def accuracy(logits, labels):
'''
Evaluate the quality of the logits at predicting the label
'''
# for summary
with tf.name_scope('accuracy') as scope:
correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1))
correct = tf.cast(correct, tf.float32)
accuracy = tf.reduce_mean(correct)*100.0
tf.summary.scalar(scope+'accuracy',accuracy)
return accuracy
def num_correct_prediction(logits, labels):
'''
Evaluate the quality of the logits at predicting the label
'''
correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1))
correct = tf.cast(correct, tf.int32)
| tensorflow.name_scope | 5,619 |
import tensorflow as tf
out = activation(out)
if dropout > 0:
out = tf.layers.dropout(out, rate=dropout, training=training)
out = conv(out, [2*dim[0], dim[1], dim[2]], scope="%s_conv_out"%scope, training=training, ema=ema, init=init)
h_stack1, h_stack2 = tf.split(out, 2, 3)
sigmoid_out = tf.sigmoid(h_stack2)
out = (h_stack1 * sigmoid_out)
out_shp = out.get_shape().as_list()
if out_shp[1:-1] < in_shp[1:-1]:
x = tf.nn.avg_pool(x, [1, dim[2][0], dim[2][1], 1], strides=[1, dim[2][0], dim[2][1], 1], padding='SAME')
| tensorflow.split | 5,620 |
import tensorflow as tf
kernel_size=kernel_size,
activation=activation,
padding='same')
return tf.layers.batch_normalization(conv1d_output, training=is_training)
| tensorflow.layers.batch_normalization | 5,621 |
import tensorflow as tf
return tf.concat(convolutions, 2)
embedding = make_convolutions(self.char_embedding)
# for highway and projection layers
n_highway = cnn_options.get('n_highway')
use_highway = n_highway is not None and n_highway > 0
use_proj = n_filters != projection_dim
if use_highway or use_proj:
# reshape from (batch_size, n_tokens, dim) to (-1, dim)
batch_size_n_tokens = tf.shape(embedding)[0:2]
embedding = tf.reshape(embedding, [-1, n_filters])
# set up weights for projection
if use_proj:
assert n_filters > projection_dim
with tf.variable_scope('CNN_proj') as scope:
W_proj_cnn = tf.get_variable(
"W_proj", [n_filters, projection_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / n_filters)),
dtype=DTYPE)
b_proj_cnn = tf.get_variable(
"b_proj", [projection_dim],
| tensorflow.reshape | 5,622 |
import tensorflow.contrib.eager as tfe
def data_format():
return "channels_first" if tfe.num_gpus() else "channels_last"
def random_dataset():
batch_size = 64
images = tf.random_normal([batch_size, 784])
labels = tf.random_uniform([batch_size], minval=0, maxval=10, dtype=tf.int32)
return tf.data.Dataset.from_tensors((images, labels))
def train(defun=False):
model = mnist.create_model(data_format())
if defun:
model.call = tfe.defun(model.call)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
dataset = random_dataset()
with tf.device(device()):
mnist_eager.train(model, optimizer, dataset,
step_counter=tf.train.get_or_create_global_step())
def evaluate(defun=False):
model = mnist.create_model(data_format())
dataset = random_dataset()
if defun:
model.call = tfe.defun(model.call)
with tf.device(device()):
| tensorflow.contrib.eager.defun | 5,623 |
import tensorflow as tf
tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '')
tf.app.flags.DEFINE_string('gpu_list', '1', '')
tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '')
tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint')
tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '')
tf.app.flags.DEFINE_integer('save_summary_steps', 100, '')
tf.app.flags.DEFINE_string('pretrained_model_path', None, '')
tf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision')
tf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune')
tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data')
tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets')
import model
import icdar
| tensorflow.app.flags.DEFINE_boolean | 5,624 |
import tensorflow as tf
return tf.nn.bias_add(ret*self.gamma,
self.beta,data_format=self.data_format)
def get_variables(self):
return {'gamma':self.gamma,'beta':self.beta} if self.gamma is not None else {}
class InstanceNorm():
def __init__(self,name,format='NCHW',epsilon=1e-5) :
assert(format=='NCHW' or format=='NHWC')
self.axis = [2,3] if format == 'NCHW' else [1,2]
self.epsilon = epsilon
self.name = name
def __call__(self,input_var) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
return (input_var - mean) / tf.sqrt(var+self.epsilon)
class BatchNorm(object):
def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) :
self.momentum = momentum
self.epsilon = epsilon
self.axis = axis
self.center=center
self.scale=scale
with tf.variable_scope(name) as scope:
with tf.variable_scope('bn') :
self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0))
self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0))
self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False)
| tensorflow.nn.moments | 5,625 |
import tensorflow as tf
self.w_attn_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size])
self.v_attn = tf.get_variable("v", [self.lstm_size, 1])
def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False):
"""Build the sampler ops and the log_prob ops."""
print ("-" * 80)
print ("Build controller sampler")
anchors = tf.TensorArray(
tf.float32, size=self.num_cells + 2, clear_after_read=False)
anchors_w_1 = tf.TensorArray(
tf.float32, size=self.num_cells + 2, clear_after_read=False)
arc_seq = tf.TensorArray(tf.int32, size=self.num_cells * 4)
if prev_c is None:
assert prev_h is None, "prev_c and prev_h must both be None"
prev_c = [tf.zeros([1, self.lstm_size], tf.float32)
for _ in range(self.lstm_num_layers)]
| tensorflow.TensorArray | 5,626 |
import tensorflow as tf
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
| tensorflow.stack | 5,627 |
import tensorflow as tf
@private_method
def _PS(self, I, r):
X = tf.transpose(I, [2, 1, 0]) # (r, w, b)
X = tf.batch_to_space_nd(X, [r], [[0, 0]]) # (1, r*w, b)
X = tf.transpose(X, [2, 1, 0])
return X
| tensorflow.transpose | 5,628 |
import tensorflow as tf
reader = tf.TFRecordReader
# Features in Pascal VOC TFRecords.
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
| tensorflow.VarLenFeature | 5,629 |
import tensorflow as tf
self._lr = lr
self._lr_summary = tf.summary.scalar('learning_rate', self._lr)
tvars = tf.trainable_variables()
grads = tf.gradients(avg_neg_log_lhood, tvars)
if grad_clip > 0.0:
grads, _ = tf.clip_by_global_norm(grads, grad_clip)
if opt == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr)
else:
raise NotImplementedError()
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=tf.contrib.framework.get_or_create_global_step())
# Tensor exports
self.feats_audio = feats_audio_nunroll
self.feats_other = feats_other_nunroll
| tensorflow.train.GradientDescentOptimizer | 5,630 |
from tensorflow.python.training import moving_averages
mean: The mean value to update with.
second_moment: The second_moment value to update with.
is_training: Boolean Tensor to indicate if we're currently in
training mode.
"""
def build_update_ops():
"""Builds the exponential moving average update ops."""
update_mean_op = moving_averages.assign_moving_average(
variable=self._moving_mean,
value=mean,
decay=self._decay_rate,
name="update_moving_mean").op
update_second_moment_op = moving_averages.assign_moving_average(
variable=self._moving_second_moment,
value=second_moment,
| tensorflow.python.training.moving_averages.assign_moving_average | 5,631 |
import tensorflow as tf
b = tf.get_variable("map_attns/bias", shape=shape[-1:])
x = tf.einsum('ijk,kl->ijl', x, w) + b
if chaining_non_linearity:
x = tf.nn.tanh(x)
attention_states[0] += x
| tensorflow.nn.tanh | 5,632 |
import tensorflow as tf
for step in num_inner_grad_steps:
sampler.sample()
algo.compute_updated_dists()
algo.optimize_policy()
sampler.update_goals()
"""
with self.sess.as_default() as sess:
# initialize uninitialized vars (only initialize vars that were not loaded)
uninit_vars = [var for var in tf.global_variables() if not sess.run(tf.is_variable_initialized(var))]
sess.run(tf.variables_initializer(uninit_vars))
start_time = time.time()
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
logger.log("\n ---------------- Iteration %d ----------------" % itr)
logger.log("Sampling set of tasks/goals for this meta-batch...")
self.sampler.update_tasks()
self.policy.switch_to_pre_update() # Switch to pre-update policy
| tensorflow.variables_initializer | 5,633 |
import tensorflow as tf
def _setup_placeholders(self):
if self.demo:
self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context")
self.q = tf.placeholder(tf.int32, [None, self.config.max_q_len], "question")
self.ch = tf.placeholder(tf.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char")
self.qh = tf.placeholder(tf.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char")
self.start_label = tf.placeholder(tf.int32, [None], "answer_label1")
self.end_label = tf.placeholder(tf.int32, [None], "answer_label2")
else:
self.c = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len],
"context")
self.q = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len],
"question")
self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len,
self.config.max_ch_len], "context_char")
self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len,
self.config.max_ch_len], "question_char")
self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1")
self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2")
| tensorflow.placeholder | 5,634 |
import tensorflow as tf
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class DepthConv2d(object) :
def __init__(self,name,input_dim,channel_multiplier,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NCHW', padding='SAME') :
with tf.variable_scope(name) :
assert(data_format == 'NCHW' or data_format == 'NHWC')
self.w = tf.get_variable('w', [k_h, k_w, input_dim, channel_multiplier],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[input_dim*channel_multiplier], initializer=tf.constant_initializer(0.0))
if( data_format == 'NCHW' ) :
self.strides = [1, 1, d_h, d_w]
| tensorflow.variable_scope | 5,635 |
import tensorflow as tf
# Summaries #
def _add_image_summary(self, image, boxes):
# add back mean
'''
tf.stack()这是一个矩阵拼接的函数,tf.unstack()则是一个矩阵分解的函数
'''
image += cfg.FLAGS2["pixel_means"]
# bgr to rgb (opencv uses bgr)
channels = tf.unstack(image, axis=-1)
image = tf.stack([channels[2], channels[1], channels[0]], axis=-1)
# dims for normalization
width = tf.to_float(tf.shape(image)[2])
height = tf.to_float(tf.shape(image)[1])
# from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1]
cols = tf.unstack(boxes, axis=1)
boxes = tf.stack([cols[1] / height,
cols[0] / width,
cols[3] / height,
cols[2] / width], axis=1)
# add batch dimension (assume batch_size==1)
#assert image.get_shape()[0] == 1
boxes = tf.expand_dims(boxes, dim=0)
image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth
return tf.summary.image('ground_truth', image)
def _add_act_summary(self, tensor):
tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor)
tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction',
tf.nn.zero_fraction(tensor))
| tensorflow.stack | 5,636 |
import tensorflow as tf
return self._perm
@property
def rightmost_transposed_ndims(self):
return self._rightmost_transposed_ndims
def _forward(self, x):
return self._transpose(x, self.perm)
def _inverse(self, y):
return self._transpose(y, tf.argsort(self.perm))
def _inverse_log_det_jacobian(self, y):
return tf.constant(0, dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return tf.constant(0, dtype=x.dtype)
def _transpose(self, x, perm):
sample_batch_ndims = tf.rank(x) - self.rightmost_transposed_ndims
perm = tf.concat([
tf.range(sample_batch_ndims),
sample_batch_ndims + perm,
], axis=0)
return tf.transpose(a=x, perm=perm)
| tensorflow.constant | 5,637 |
import tensorflow.contrib.graph_editor as ge
if hasattr(ops, '__iter__') and not isinstance(ops, str):
l = [(op.name if hasattr(op, "name") else str(op)) for op in ops]
if sort_outputs:
return sorted(l)
return l
else:
return ops.name if hasattr(ops, "name") else str(ops)
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
for op in wait_to_do_ops:
ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs]
ge.add_control_inputs(op, ci)
| tensorflow.contrib.graph_editor.add_control_inputs | 5,638 |
import tensorflow as tf
with tf.control_dependencies(control_inputs=(assignment,)):
assignment = tf.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity))
| tensorflow.assign | 5,639 |
import tensorflow as tf
b1 = tf.matmul(state, hyper_b_1)
w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices
| tensorflow.reshape | 5,640 |
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
regressor = estimator.GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
regressor.fit(input_fn=_train_input_fn, steps=15)
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
| tensorflow.python.feature_column.feature_column_lib.numeric_column | 5,641 |
import tensorflow as tf
clip_value = clipping_kwargs["value"]
clipped_grads_and_vars = [(tf.clip_by_norm(g, clip_value), v) for (g, v) in zip(grads, variables)]
| tensorflow.clip_by_norm | 5,642 |
from tensorflow.core.framework import op_def_pb2
inputs: A list of (name, data type) pairs of function arguments.
outputs: A list of (name, data type) pairs of function return values.
"""
self._sig = op_def_pb2.OpDef()
self._sig.name = func_name
| tensorflow.core.framework.op_def_pb2.OpDef | 5,643 |
import tensorflow as tf
num_batches = int(mnist.validation.num_examples/batch_size)
for j in range(num_batches):
# Classify unseen validation data instead of test data or train data
batch_x_l, batch_y_l = mnist.validation.next_batch(batch_size=batch_size)
encoder_acc = sess.run(accuracy, feed_dict={x_input_l: batch_x_l, y_input: batch_y_l})
acc += encoder_acc
acc /= num_batches
print("Encoder Classification Accuracy: {}".format(acc))
with open(log_path + '/log.txt', 'a') as log:
log.write("Encoder Classification Accuracy: {}".format(acc))
saver.save(sess, save_path=saved_model_path, global_step=step)
else:
# Get the latest results folder
all_results = os.listdir(results_path)
all_results.sort()
saver.restore(sess, save_path=tf.train.latest_checkpoint(results_path + '/' +
all_results[-1] + '/Saved_models/'))
generate_image_grid(sess, op=decoder_image)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Autoencoder Train Parameter")
parser.add_argument('--train', '-t', type=bool, default=True,
help='Set to True to train a new model, False to load weights and display image grid')
args = parser.parse_args()
train(train_model=args.train)
| tensorflow.train.latest_checkpoint | 5,644 |
import tensorflow as tf
with tf.variable_scope('B'):
BAB = self.generator(BA)
viz3('A_recon', A, AB, ABA)
viz3('B_recon', B, BA, BAB)
with tf.variable_scope('discrim'):
with tf.variable_scope('A'):
A_dis_real = self.discriminator(A)
A_dis_fake = self.discriminator(BA)
with tf.variable_scope('B'):
B_dis_real = self.discriminator(B)
B_dis_fake = self.discriminator(AB)
| tensorflow.variable_scope | 5,645 |
import tensorflow as tf
num_adv = int(hps.batch_size/10)
print('num_adv is %d'%(num_adv))
else:
num_adv = hps.batch_size
for i in six.moves.range(num_batch):
print(i)
(logits_part_nor, logits_part_adv, labels_part) = sess.run([logits_nor, logits_adv, tf.argmax(labels, 1)])
logits_all = np.concatenate((logits_all, logits_part_nor), axis=0)
labels_all = np.concatenate((labels_all, labels_part), axis=0)
is_adv_all = np.concatenate((is_adv_all, np.zeros(hps.batch_size)), axis=0)
logits_all = np.concatenate((logits_all, logits_part_adv[:num_adv]), axis=0)
labels_all = np.concatenate((labels_all, labels_part[:num_adv]), axis=0)
is_adv_all = np.concatenate((is_adv_all, np.ones(num_adv)), axis=0)
| tensorflow.argmax | 5,646 |
import tensorflow as tf
landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 10])
class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class])
loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4])
landm_true = tf.reshape(y_true[..., 4:14], [num_batch * num_prior, 10])
landm_valid = tf.reshape(y_true[..., 14], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 15], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
| tensorflow.reshape | 5,647 |
import tensorflow as tf
return uint8_resize_bicubic(image, shape)
def center_crop(image, size):
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - size) // 2
offset_width = (image_width - size) // 2
image = tf.slice(image, [offset_height, offset_width, 0], [size, size, -1])
| tensorflow.shape | 5,648 |
import tensorflow as tf
import numpy as np
import random
import TensorflowUtils as utils
import read_MITSceneParsingDataParis as scene_parsing
import datetime
import BatchDatsetReader as dataset
from six.moves import xrange
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "50", "batch size for training")
tf.flags.DEFINE_string("logs_dir", "/scratch1/ram095/nips20/logs_mnist128/", "path to logs directory")
tf.flags.DEFINE_string("data_dir", "/scratch1/ram095/nips20/paris_street", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat")
tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False")
tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize")
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
MAX_ITERATION = int(1e5 + 1)
NUM_OF_CLASSESS = 3
IMAGE_SIZE = 128
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
| tensorflow.flags.DEFINE_bool | 5,649 |
import tensorflow as tf
self.input_tensor = load_base64_tensor(self.input)
else:
raise ValueError("invalid input type '{}'".format(input_type))
x = self.input_tensor
x = tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], 'CONSTANT')
x = self.__conv2d("conv_1", x, filter_depth=64,
kernel_size=7, stride=2, padding='valid')
x = self.__batch_norm("bn_1", x)
| tensorflow.pad | 5,650 |
import tensorflow as tf
predict_examples = read_examples(FLAGS.input_file)
features, all_tokens = convert_examples_to_features(predict_examples,
FLAGS.max_seq_length, tokenizer)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d", len(predict_examples))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
if FLAGS.use_tpu:
# Warning: According to tpu_estimator.py Prediction on TPU is an
| tensorflow.logging.info | 5,651 |
import tensorflow as tf
# Create biases with shape [1, num_labels, num_anchors].
biases = tf.contrib.framework.model_variable(
name='biases',
shape=[1, num_labels, num_anchors],
dtype=logits.dtype,
initializer=tf.zeros_initializer(),
collections=variables_collections,
trainable=trainable)
# Maybe create label_priors.
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
label_priors = tf.reshape(label_priors, [1, num_labels, 1])
# Expand logits, labels, and weights to shape [batch_size, num_labels, 1].
logits = tf.expand_dims(logits, 2)
labels = tf.expand_dims(labels, 2)
weights = tf.expand_dims(weights, 2)
# Calculate weighted loss and other outputs. The log(2.0) term corrects for
# logloss not being an upper bound on the indicator function.
loss = weights * losses_utils.weighted_surrogate_loss(
labels,
logits + biases,
surrogate_type=surrogate_type,
positive_weights=1.0 + lambdas * (1.0 - precision_values),
negative_weights=lambdas * precision_values)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2
per_anchor_loss = loss - lambda_term
per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2)
| tensorflow.expand_dims | 5,652 |
import tensorflow as tf
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '1, 0.5, 0.1',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
# checkpoint related configuration
tf.app.flags.DEFINE_string(
'checkpoint_path', './model',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', '',
| tensorflow.app.flags.DEFINE_string | 5,653 |
import tensorflow as tf
logits = self.tanh_constant * tf.tanh(logits)
index = tf.multinomial(logits, 1)
| tensorflow.multinomial | 5,654 |
import tensorflow as tf
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
| tensorflow.nn.seq2seq.embedding_attention_seq2seq | 5,655 |
import tensorflow as tf
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
| tensorflow.constant | 5,656 |
import tensorflow as tf
def test_output_equal_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
test_string = 'hello world'
feed_dict = {string_placeholder: test_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
self.assertEqual(test_string, out_string)
def test_output_is_integer_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
empty_string = ''
feed_dict = {string_placeholder: empty_string}
tf.set_random_seed(0)
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
# Test whether out_string is a string which represents an integer.
int(out_string) # throws an error if out_string is not castable to int.
| tensorflow.placeholder | 5,657 |
import tensorflow as tf
images,labels=tf.train.batch([image,label],batch_size=batch_size)
return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])
class trainwork(object):
def __init__(self):
with tf.variable_scope('scop'):
self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d())
self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0))
self.b2 = tf.get_variable('b2', [classnum],initializer=tf.constant_initializer(0.0))
| tensorflow.variable_scope | 5,658 |
import tensorflow as tf
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
| tensorflow.identity | 5,659 |
import tensorflow as tf
for name in sorted(self.internals_memory):
next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)
| tensorflow.gather | 5,660 |
import tensorflow as tf
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
| tensorflow.placeholder | 5,661 |
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
# Block 3
conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2)
conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a)
conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b)
pool3 = MaxPool2D(pool_size=[2,2])(conv3c)
# final convolutional layer
#removed GOAL_SIZE
| tensorflow.keras.layers.MaxPool2D | 5,662 |
import tensorflow as tf
# be careful to have at least one weight be nonzero
# should we be taking the mean elem-wise by batch? i think this is a big bug
avg_neg_log_lhood = tf.reduce_sum(neg_log_lhoods) / tf.reduce_sum(target_weights)
neg_log_lhoods_inspect = tf.reshape(neg_log_lhoods, [batch_size, rnn_nunroll])
# Train op
if mode == 'train':
lr = tf.Variable(0.0, trainable=False)
self._lr = lr
self._lr_summary = tf.summary.scalar('learning_rate', self._lr)
tvars = tf.trainable_variables()
grads = tf.gradients(avg_neg_log_lhood, tvars)
if grad_clip > 0.0:
grads, _ = tf.clip_by_global_norm(grads, grad_clip)
if opt == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr)
else:
raise NotImplementedError()
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=tf.contrib.framework.get_or_create_global_step())
# Tensor exports
| tensorflow.gradients | 5,663 |
import tensorflow as tf
parser.add_argument('--max_train_step', type=int, default=50000, help='the maximum training step')
parser.add_argument('--model_path', type=str, default='', help='the path of checkpoint file')
args = parser.parse_args()
def model():
x = tf.placeholder(tf.float32, [None, 784], name='x')
gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth')
with tf.variable_scope('layer1'):
w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer())
b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0))
h1 = tf.nn.relu(tf.matmul(x, w1) + b1)
with tf.variable_scope('layer2'):
w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer())
b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0))
h2 = tf.nn.relu(tf.matmul(h1, w2) + b2)
with tf.variable_scope('layer3'):
w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer())
b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0))
y = tf.matmul(h2, w3) + b3
# losses
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y))
# optimizer
optimizer = tf.train.GradientDescentOptimizer(args.lr)
# define one-step train ops
| tensorflow.random_normal_initializer | 5,664 |
import tensorflow as tf
if gpu_idx == 0:
update = tf.assign(num_error_rate, num_error_rate + 1.)
with tf.control_dependencies([update]):
tc = tf.maximum(.01, 1. / num_error_rate)
update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate)
with tf.control_dependencies([update]):
self.d_loss_class = tf.identity(self.d_loss_class)
self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.end_points_D['D_on_G_logits'],
labels=tf.zeros_like(self.end_points_D['D_on_G_logits']))
self.d_loss_class = tf.reduce_mean(self.d_loss_class)
self.d_loss_real = tf.reduce_mean(self.d_loss_real)
self.d_loss_fake = tf.reduce_mean(self.d_loss_fake)
if is_fm_loss:
global_pool_head = self.end_points_D['global_pool']
real_data_features = tf.slice(global_pool_head, [0, 0], [batch_size_train, num_classes])
fake_data_features = tf.slice(global_pool_head, [batch_size_train, 0],
[batch_size_train, num_classes])
self.g_loss = self._feature_matching_loss(real_data_features, fake_data_features)
| tensorflow.reduce_mean | 5,665 |
from tensorflow.python.ops import data_flow_ops
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
fetches = [train_op, total_loss] + enqueue_ops
return (enqueue_ops, fetches)
def add_forward_pass_and_gradients(
self, host_images, host_labels, nclass, phase_train, device_num,
input_data_type, data_type, input_nchan, use_synthetic_gpu_images,
gpu_copy_stage_ops, gpu_compute_stage_ops, gpu_grad_stage_ops):
"""Add ops for forward-pass and gradient computations."""
if not use_synthetic_gpu_images:
with tf.device(self.cpu_device):
images_shape = host_images.get_shape()
labels_shape = host_labels.get_shape()
gpu_copy_stage = data_flow_ops.StagingArea(
[tf.float32, tf.int32],
shapes=[images_shape, labels_shape])
gpu_copy_stage_op = gpu_copy_stage.put(
[host_images, host_labels])
gpu_copy_stage_ops.append(gpu_copy_stage_op)
host_images, host_labels = gpu_copy_stage.get()
with tf.device(self.raw_devices[device_num]):
if not use_synthetic_gpu_images:
gpu_compute_stage = data_flow_ops.StagingArea(
[tf.float32, tf.int32],
shapes=[images_shape, labels_shape]
)
| tensorflow.python.ops.data_flow_ops.StagingArea | 5,666 |
import tensorflow as tf
class WeightNormSymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
self.conv2d = WeightNormConv2d('conv',input_dim,output_dim,k_h,k_w,1,1,data_format='NHWC',padding='VALID')
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return self.conv2d(_t)
def get_variables(self):
return self.conv2d.get_variables()
class TransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NCHW') :
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, out_dim, input_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
| tensorflow.pad | 5,667 |
import tensorflow as tf
# Convolution bank: concatenate on the last axis to stack channels from all convolutions
conv_outputs = tf.concat(
[conv1d(inputs, k, 128, tf.nn.relu, is_training, 'conv1d_%d' % k) for k in range(1, K + 1)],
axis=-1
)
# Maxpooling:
maxpool_output = tf.layers.max_pooling1d(
conv_outputs,
pool_size=2,
strides=1,
padding='same')
# Two projection layers:
| tensorflow.layers.max_pooling1d | 5,668 |
import tensorflow as tf
tf.flags.DEFINE_boolean(
'use_nccl', True,
'Whether to use nccl all-reduce primitives where possible')
# Distributed training flags.
tf.flags.DEFINE_string('job_name', '',
'One of "ps", "worker", "". Empty for local training')
tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts')
tf.flags.DEFINE_string('worker_hosts', '',
'Comma-separated list of target hosts')
tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job')
tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers')
tf.flags.DEFINE_boolean('cross_replica_sync', True, '')
# Summary and Save & load checkpoints.
tf.flags.DEFINE_integer('summary_verbosity', 0,
"""Verbosity level for summary ops. Pass 0 to disable
both summaries and checkpoints.""")
tf.flags.DEFINE_integer('save_summaries_steps', 0,
"""How often to save summaries for trained models.
Pass 0 to disable summaries.""")
tf.flags.DEFINE_integer('save_model_secs', 0,
"""How often to save trained models. Pass 0 to disable
checkpoints""")
tf.flags.DEFINE_string('train_dir', None,
"""Path to session checkpoints.""")
tf.flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval',
"""Directory where to write eval event logs.""")
tf.flags.DEFINE_string('pretrain_dir', None,
"""Path to pretrained session checkpoints.""")
| tensorflow.flags.DEFINE_integer | 5,669 |
import tensorflow as tf
features["inputs"] = tf.tile(features["inputs"], [1, beam_size, 1, 1, 1])
s = common_layers.shape_list(features["inputs"])
features["inputs"] = tf.reshape(features["inputs"],
[s[0] * s[1], s[2], s[3], s[4]])
target_modality = self._problem_hparams.target_modality
vocab_size = target_modality.top_dimensionality
# Setting decode length to input length + decode_length
decode_length = tf.constant(decode_length)
if "partial_targets" not in features:
decode_length += common_layers.shape_list(features["inputs"])[1]
ids, scores = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
| tensorflow.constant | 5,670 |
import tensorflow as tf
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
| tensorflow.variable_scope | 5,671 |
import tensorflow as tf
# mean squared error
def mean_square_error(self):
return tf.reduce_mean(tf.square(self.output_mask * (self.predictions - self.y)))
# regularizations
def regularization(self):
reg = 0
# L1 weight regularization
reg += self.L1_in * tf.reduce_mean(tf.abs(self.W_in) * self.input_Connectivity)
reg += self.L1_rec * tf.reduce_mean(tf.abs(self.W_rec) * self.rec_Connectivity)
if self.dale_ratio:
reg += self.L1_out * tf.reduce_mean(tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out))
else:
reg += self.L1_out * tf.reduce_mean(tf.abs(self.W_out) * self.output_Connectivity)
# L2 weight regularization
reg += self.L2_in * tf.reduce_mean(tf.square(tf.abs(self.W_in) * self.input_Connectivity))
| tensorflow.abs | 5,672 |
from tensorflow.contrib.learn.python.learn.estimators import test_data
indices=((0, 0), (0, 1), (60, 0)),
dense_shape=(len(iris.target), 2))
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), (-1, 1))
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(
cont_features[i],
test_data.get_quantile_based_buckets(iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=tempfile.mkdtemp(),
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=(3, 3))
metrics = classifier.fit(input_fn=_input_fn, steps=_ITERS).evaluate(
| tensorflow.contrib.learn.python.learn.estimators.test_data.get_quantile_based_buckets | 5,673 |
import tensorflow as tf
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
| tensorflow.global_variables_initializer | 5,674 |
import tensorflow as tf
inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))
smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)
smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)
smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),
tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))
| tensorflow.multiply | 5,675 |
import tensorflow as tf
if encoder.batch_norm:
encoder_inputs_ = tf.layers.batch_normalization(encoder_inputs_, training=training,
name='conv_batch_norm_{}'.format(k))
if encoder.conv_activation is not None and encoder.conv_activation.lower() == 'relu':
encoder_inputs_ = tf.nn.relu(encoder_inputs_)
encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / strides[1]))
feature_size = encoder_inputs_.shape[2].value
channels = encoder_inputs_.shape[3].value
time_steps = tf.shape(encoder_inputs_)[1]
encoder_inputs_ = tf.reshape(encoder_inputs_, [batch_size, time_steps, feature_size * channels])
conv_outputs_ = encoder_inputs_
if encoder.conv_lstm_size:
cell = BasicConvLSTMCell([feature_size, channels], encoder.conv_lstm_size, 1)
encoder_inputs_, _ = tf.nn.bidirectional_dynamic_rnn(
cell, cell, encoder_inputs_,
dtype=tf.float32
)
encoder_inputs_ = tf.concat(encoder_inputs_, axis=2)
if encoder.convolutions:
| tensorflow.reshape | 5,676 |
import tensorflow as tf
@staticmethod
def _relu(name, x):
with tf.variable_scope(name):
return tf.nn.relu(x)
@staticmethod
def _fc(name, x, output_dim=128,
| tensorflow.nn.relu | 5,677 |
import tensorflow as tf
# Load the converted parameters
print('Loading the model')
net.load(model_data_path, sess)
uninitialized_vars = []
for var in tf.global_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
| tensorflow.global_variables | 5,678 |
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import variables
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
"""Ops tests."""
def test_softmax_classifier(self):
with self.cached_session() as session:
features = array_ops.placeholder(dtypes.float32, [None, 3])
labels = array_ops.placeholder(dtypes.float32, [None, 2])
weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]])
biases = constant_op.constant([0.2, 0.3])
class_weight = constant_op.constant([0.1, 0.9])
prediction, loss = ops.softmax_classifier(features, labels, weights,
biases, class_weight)
self.assertEqual(prediction.get_shape()[1], 2)
self.assertEqual(loss.get_shape(), [])
value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]})
self.assertAllClose(value, 0.55180627)
def test_embedding_lookup(self):
d_embed = 5
n_embed = 10
ids_shape = (2, 3, 4)
embeds = np.random.randn(n_embed, d_embed)
ids = np.random.randint(0, n_embed, ids_shape)
with self.cached_session():
| tensorflow.python.framework.constant_op.constant | 5,679 |
import tensorflow as tf
rank = _rank(tensor)
assert rank == 3, "Use embedding lookup layer"
binary_mask = _apply_dropout_mask(tf.shape(tensor)[:2], keep_prob, normalize=False)
binary_mask = tf.expand_dims(binary_mask, axis=-1) # proper broadcasting to zero out entire word vectors
| tensorflow.shape | 5,680 |
import tensorflow as tf
mobilenet_v1.mobilenet_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEqual(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
| tensorflow.global_variables_initializer | 5,681 |
import tensorflow as tf
#--model_scope=blouse --checkpoint_path=./logs/all --data_format=channels_last --batch_size=1
def input_pipeline(is_training=True, model_scope=FLAGS.model_scope, num_epochs=FLAGS.epochs_per_eval):
if 'all' in model_scope:
lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64),
tf.constant(config.global_norm_lvalues, dtype=tf.int64)), 0)
rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64),
tf.constant(config.global_norm_rvalues, dtype=tf.int64)), 1)
else:
lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64),
tf.constant(config.local_norm_lvalues, dtype=tf.int64)), 0)
rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64),
tf.constant(config.local_norm_rvalues, dtype=tf.int64)), 1)
preprocessing_fn = lambda org_image, classid, shape, key_x, key_y, key_v: preprocessing.preprocess_image(org_image, classid, shape, FLAGS.train_image_size, FLAGS.train_image_size, key_x, key_y, key_v, (lnorm_table, rnorm_table), is_training=is_training, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC'), category=(model_scope if 'all' not in model_scope else '*'), bbox_border=FLAGS.bbox_border, heatmap_sigma=FLAGS.heatmap_sigma, heatmap_size=FLAGS.heatmap_size)
images, shape, classid, targets, key_v, isvalid, norm_value = dataset.slim_get_split(FLAGS.data_dir, preprocessing_fn, (FLAGS.xt_batch_size if 'seresnext50' in FLAGS.backbone else FLAGS.batch_size), FLAGS.num_readers, FLAGS.num_preprocessing_threads, num_epochs=num_epochs, is_training=is_training, file_pattern=FLAGS.dataset_name, category=(model_scope if 'all' not in model_scope else '*'), reader=None)
| tensorflow.constant | 5,682 |
import tensorflow as tf
if monitorSession:
# MonitoredSession
# this will restore all the variables from the latest checkpoint if it exists
self._fix_checkpoint_abs_to_rel(self._checkpoint_dir) # need to ensure checkpoint has relative path saved
chiefsess_creator = tf.train.ChiefSessionCreator(config=sess_config, checkpoint_dir=self._checkpoint_dir)
if self._restore_chkptfile is not None:
self._network.init_saver()
# this is restoring variables
| tensorflow.train.ChiefSessionCreator | 5,683 |
import tensorflow as tf
########################
D = (tf.reduce_sum((channel_1 - channel_2)**2, reduction_indices=1))**0.5
zeros = tf.fill(tf.shape(D), 0.0)
# loss = 0.5*(label*(D**2.) + (1-label) * (tf.reduce_max([zeros, margin - D], reduction_indices=0))**2)
| tensorflow.shape | 5,684 |
import tensorflow as tf
self._moving_second_moment = tf.get_variable(
"moving_second_moment",
shape=self._mean_shape,
collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES],
initializer=tf.ones_initializer(),
trainable=False)
self._moving_variance = tf.subtract(self._moving_second_moment,
tf.square(self._moving_mean),
name="moving_variance")
def build_batch_stats():
"""Builds the batch statistics calculation ops."""
# Copy for better stability.
# We use the moving mean as an estimate of the mean in order to perform
# a more numerically stable calculation of the batch mean.
shift = tf.add(self._moving_mean, 0)
| tensorflow.square | 5,685 |
import tensorflow as tf
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
| tensorflow.layers.dense | 5,686 |
import tensorflow as tf
w = w if w is not None else self.w
b = b if b is not None else self.b
#k_t,k_h,k_w,_,_ = self.w.get_shape().as_list()
#_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC")
return tf.nn.bias_add(
tf.nn.convolution(input_var, w,
strides=self.strides,
data_format='NDHWC',
padding='SAME'),
b,name=name)
| tensorflow.nn.convolution | 5,687 |
import tensorflow as tf
def contra_traj_lossV4(pred, tgt, horizon=12, resample=1, hard_ratio=1.0):
horizon_pred = horizon_sumV1(pred, horizon)
horizon_tgt = horizon_sumV1(tgt, horizon)
pred_flat = tf.reshape(horizon_pred, [-1])
tgt_flat = tf.reshape(horizon_tgt, [-1])
batch = tf.stack([pred_flat, tgt_flat], 1)
sample_func = sample_pair(batch)
def sample_compute(_):
pairs = sample_func()
loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio)
pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32)
p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4,
lambda: tf.print('csrt acc ', [pct]),
lambda: tf.no_op())
with tf.control_dependencies([p]):
return tf.reduce_mean(loss)
loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32,
parallel_iterations=32)
final_loss = tf.reduce_mean(loss)
return final_loss
| tensorflow.math.count_nonzero | 5,688 |
import tensorflow as tf
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
| tensorflow.placeholder | 5,689 |
import tensorflow as tf
logits, feat = resnet_model_fn(x, training=training_flag)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits))
Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5))
l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
Center_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num)
Total_loss = cost + l2_loss
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
| tensorflow.cast | 5,690 |
import tensorflow as tf
else:
setattr(
self,
'mu_%s' % layer,
tf.constant(1.))
if self.gamma:
setattr(
| tensorflow.constant | 5,691 |
import tensorflow as tf
A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1])
# Compute z_{h}A_{h+1}
Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5)
Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5)
# Compute u_{h+1} and v_{h+1}
U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2)
Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2.
# Output layer
else:
F = tf.squeeze(tf.layers.dense(Z, n_out), [2])
return F, KL | tensorflow.sqrt | 5,692 |
import tensorflow as tf
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size, config)
model = revnet.RevNet(config=config)
optimizer = tf.train.GradientDescentOptimizer(0.1)
if defun:
model.call = tfe.function(model.call)
| tensorflow.train.GradientDescentOptimizer | 5,693 |
import tensorflow as tf
atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
np.log(sm_normal.cdf(0) - sm_normal.cdf(-1)),
qdist.log_prob(0.).eval(),
atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(
np.log(1. - sm_normal.cdf(1)),
qdist.log_prob(2.).eval(),
atol=0)
def test_log_prob_and_grad_gives_finite_results(self):
with self.test_session():
for dtype in [np.float32, np.float64]:
mu = tf.Variable(0., name="mu", dtype=dtype)
sigma = tf.Variable(1., name="sigma", dtype=dtype)
qdist = distributions.QuantizedDistribution(
base_dist_cls=distributions.Normal,
mu=mu,
sigma=sigma)
x = np.arange(-100, 100, 2).astype(dtype)
tf.initialize_all_variables().run()
proba = qdist.log_prob(x)
grads = tf.gradients(proba, [mu, sigma])
self._assert_all_finite(proba.eval())
self._assert_all_finite(grads[0].eval())
| tensorflow.Variable | 5,694 |
import tensorflow as tf
)
outputs = H * T + inputs * (1.0 - T)
return outputs
def conv1d_banks(inputs, K=16, is_training=True, scope="conv1d_banks"):
with tf.variable_scope(scope):
outputs = tf.layers.conv1d(inputs, embed_size // 2, 1, padding="SAME")
for k in range(2, K + 1):
with tf.variable_scope("num_{}".format(k)):
output = tf.layers.conv1d(inputs, embed_size // 2, k, padding="SAME")
outputs = tf.concat((outputs, output), -1)
| tensorflow.variable_scope | 5,695 |
from tensorflow.python.framework import ops
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.op_scope([value, filter, output_shape], name,
"conv2d_transpose") as name:
| tensorflow.python.framework.ops.op_scope | 5,696 |
import tensorflow as tf
Args:
q_logits: logits for 1st argument of KL divergence shape
[num_timesteps * batch_size, num_classes] if num_classes > 2, and
[num_timesteps * batch_size] if num_classes == 2.
p_logits: logits for 2nd argument of KL divergence with same shape q_logits.
weights: 1-D `float` tensor with shape [num_timesteps * batch_size].
Elements should be 1.0 only on end of sequences
num_classes: a `int`, number of training classes
Returns:
a `float` `scalar`, KL divergence.
"""
if num_classes == 2:
q = tf.nn.sigmoid(q_logits)
p = tf.nn.sigmoid(p_logits)
kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +
f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))
else:
q = tf.nn.softmax(q_logits)
p = tf.nn.softmax(p_logits)
kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
kl.get_shape().assert_has_rank(2)
weights.get_shape().assert_has_rank(1)
| tensorflow.nn.sigmoid | 5,697 |
import tensorflow as tf
# Critic 學習率
# C_LR = 0.0002
C_LR = 0.002
class MODEL(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, 84, 84, 3], 'state')
c0 = tf.cast(self.tfs, tf.float32) / 255.
c1 = tf.nn.relu(self.conv(c0,
'c1',
nf=32,
rf=8,
stride=4,
init_scale=np.sqrt(2)))
c2 = tf.nn.relu(
self.conv(
| tensorflow.cast | 5,698 |
from tensorflow.python.ops import nn_ops
with tf.device("/gpu:0"):
conv = nn_ops.conv3d(d1, d2, strides, padding)
| tensorflow.python.ops.nn_ops.conv3d | 5,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.