seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf self.saver = tf.train.Saver(tf.global_variables()) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0., state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return tf.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.) raise ValueError('rnn_mode {} not supported'.format(config.rnn_mode)) def _build_rnn_graph(self, inputs, config, is_training): def make_cell(): cell = self._get_lstm_cell(config, is_training)
tensorflow.contrib.rnn.LSTMBlockCell
5,300
import tensorflow as tf self.run_op_benchmark( sess, st_deserialized_op, min_iters=2000, name="benchmark_very_large_2d_float_st_serialization") if __name__ == "__main__": tf.test.main()
tensorflow.test.main
5,301
import tensorflow as tf # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp) loss = unorm_w * loss / (tf.reduce_sum(unorm_w)) a = tf.print(tf.reduce_sum(unorm_w)) with tf.control_dependencies([a]): final_loss = tf.reduce_sum(loss) return final_loss, cstr_pct def contra_traj_lossV8(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0) horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0) pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool)
tensorflow.reduce_sum
5,302
import tensorflow as tf tf.TensorShape([None, None, None, None, None]), tf.TensorShape([]), ], back_prop=False, parallel_iterations=1) if inputs_old is not None: # Restore to not confuse Estimator. features["inputs"] = inputs_old # Reassign targets back to the previous value. if targets_old is not None: features["targets"] = targets_old losses = {"training": loss} if "partial_targets" in features: partial_target_length = common_layers.shape_list( features["partial_targets"])[1] result = tf.slice(result, [0, partial_target_length, 0, 0], [-1, -1, -1, -1]) return { "outputs": result, "scores": None, "logits": logits, "losses": losses, } def sample(self, features): """Run the model and extract samples. Args: features: an map of string to `Tensor`.
tensorflow.slice
5,303
import tensorflow as tf if tf.DeviceSpec.from_string(device.name).device_type == "TPU": return (32,) return (16, 32) def _force_device_sync(self): """Shamelessly copied from `resnet50_test.py`.""" tf.constant(1.).cpu() def _report(self, label, start, num_iters, device, batch_size, data_format): avg_time = (time.time() - start) / num_iters dev = tf.DeviceSpec.from_string(device).device_type.lower() name = "%s_%s_batch_%d_%s" % (label, dev, batch_size, data_format) extras = {"examples_per_sec": batch_size / avg_time} self.report_benchmark( iters=num_iters, wall_time=avg_time, name=name, extras=extras) def _benchmark_eager_apply(self, label, device_and_format, defun=False,
tensorflow.DeviceSpec.from_string
5,304
import tensorflow as tf c = c*(1-m) h = h*(1-m) z = tf.matmul(x, wx) + tf.matmul(h, wh) + b i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) i = tf.nn.sigmoid(i) f = tf.nn.sigmoid(f) o = tf.nn.sigmoid(o) u = tf.tanh(u) c = f*c + i*u h = o*tf.tanh(c) xs[idx] = h s = tf.concat(axis=1, values=[c, h]) return xs, s def _ln(x, g, b, e=1e-5, axes=[1]): u, s = tf.nn.moments(x, axes=axes, keep_dims=True) x = (x-u)/tf.sqrt(s+e) x = x*g+b return x def lnlstm(xs, ms, s, scope, nh, init_scale=1.0): nbatch, nin = [v.value for v in xs[0].get_shape()] with tf.variable_scope(scope): wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale)) gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0)) bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0)) wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale)) gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0)) bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
tensorflow.nn.moments
5,305
import tensorflow as tf segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
tensorflow.logging.info
5,306
import tensorflow as tf estimator_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, training_hooks=training_hooks) if output_type == "sess": try: pred_label = tf.argmax(distillation_loss["st_logits"], axis=-1, output_type=tf.int32) correct = tf.equal( tf.cast(tf.ones_like(label_ids, dtype=tf.int32), tf.int32), tf.cast(pred_label, tf.int32) ) st_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) pred_label = tf.argmax(distillation_loss["te_logits"], axis=-1, output_type=tf.int32) correct = tf.equal( tf.cast(tf.zeros_like(label_ids, dtype=tf.int32), tf.int32), tf.cast(pred_label, tf.int32) ) te_accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tensorflow.cast
5,307
import tensorflow as tf from __future__ import print_function import numpy as np import tensorflow as tf class AssignOpTest(tf.test.TestCase): # NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they # contain benign and deliberate data races when multiple threads update # the same parameters without a lock. def testParallelUpdateWithoutLocking(self): with self.test_session() as sess: ones_t = tf.fill([1024, 1024], 1.0) p = tf.Variable(tf.zeros([1024, 1024])) adds = [tf.assign_add(p, ones_t, use_locking=False) for _ in range(20)] tf.initialize_all_variables().run() def run_add(add_op): sess.run(add_op) threads = [self.checkedThread(target=run_add, args=(add_op,)) for add_op in adds] for t in threads: t.start() for t in threads: t.join()
tensorflow.zeros
5,308
import tensorflow as tf import variable_mgr tf.flags.DEFINE_string('model', 'trivial', 'name of the model to run') # The code will first check if it's running under benchmarking mode # or evaluation mode, depending on FLAGS.eval: # Under the evaluation mode, this script will read a saved model, # and compute the accuracy of the model against a validation dataset. # Additional ops for accuracy and top_k predictors are only used under this # mode. # Under the benchmarking mode, user can specify whether nor not to use # the forward-only option, which will only compute the loss function. # forward-only cannot be enabled with eval at the same time. tf.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking') tf.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or training for benchmarking""") tf.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device') tf.flags.DEFINE_integer('num_batches', 100, 'number of batches to run, excluding warmup') tf.flags.DEFINE_integer('num_warmup_batches', None, 'number of batches to run before timing') tf.flags.DEFINE_integer('autotune_threshold', None, 'The autotune threshold for the models') tf.flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on') tf.flags.DEFINE_integer('display_every', 10, """Number of local steps after which progress is printed
tensorflow.flags.DEFINE_boolean
5,309
import tensorflow as tf logging.info(" [*] printing trainable variables") else: try: # TF1.0+ t_vars = tf.global_variables() except Exception: # TF0.12 t_vars = tf.all_variables() logging.info(" [*] printing global variables") for idx, v in enumerate(t_vars): logging.info(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name))
tensorflow.all_variables
5,310
import tensorflow as tf shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1])
tensorflow.nn.log_softmax
5,311
import tensorflow as tf self.discriminator_loss = self.D_B_loss + self.D_A_loss self.loss_GABA_sum = tf.summary.scalar("g_loss_a2b", self.loss_GABA) self.loss_GBAB_sum = tf.summary.scalar("g_loss_b2a", self.loss_GBAB) self.g_total_loss_sum = tf.summary.scalar("g_loss", self.generator_loss) self.g_sum = tf.summary.merge([self.loss_GABA_sum,self.loss_GBAB_sum,self.g_total_loss_sum])
tensorflow.summary.scalar
5,312
import tensorflow as tf Return: A tuple of list: (nodes, adjcents) nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of hops. Specify node set of each hop, including the root. adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent matrix between hops. """ nodes = tf.reshape(nodes, [-1]) nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)]) next_shape = tf.cast(next_shape, tf.int64) next_adj = tf.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse_reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
tensorflow.unique
5,313
import tensorflow as tf eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn
tensorflow.contrib.tpu.TPUEstimatorSpec
5,314
import tensorflow as tf # The larger local steps is, the lower is the variance in our policy gradients estimate # on the one hand; but on the other hand, we get less frequent parameter updates, which # slows down learning. In this code, we found that making local steps be much # smaller than 20 makes the algorithm more difficult to tune and to get to work. self.runner = RunnerThread(env, pi, 20) grads = tf.gradients(self.loss, pi.var_list) tf.summary.scalar("model/policy_loss", pi_loss / bs) tf.summary.scalar("model/value_loss", vf_loss / bs) tf.summary.scalar("model/entropy", entropy / bs) tf.summary.image("model/state", pi.x) tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads)) tf.summary.scalar("model/var_global_norm", tf.global_norm(pi.var_list)) self.summary_op = tf.summary.merge_all() grads, _ = tf.clip_by_global_norm(grads, 40.0) # copy weights from the parameter server to the local model self.sync = tf.group(*[v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)]) grads_and_vars = list(zip(grads, self.network.var_list)) self.inc_step = self.global_step.assign_add(tf.shape(pi.x)[0]) # each worker has a different set of adam optimizer parameters opt = tf.train.AdamOptimizer(1e-4) self.train_op = tf.group(opt.apply_gradients(grads_and_vars), self.inc_step) self.summary_writer = None
tensorflow.summary.merge_all
5,315
import tensorflow as tf # with char embeddings init between -1 and 1 #w_init = tf.random_normal_initializer( # mean=0.0, # stddev=np.sqrt(2.0 / (width * char_embed_dim)) #) # Kim et al 2015, +/- 0.05 w_init = tf.random_uniform_initializer( minval=-0.05, maxval=0.05) elif cnn_options['activation'] == 'tanh': # glorot init w_init = tf.random_normal_initializer( mean=0.0, stddev=np.sqrt(1.0 / (width * char_embed_dim))
tensorflow.random_uniform_initializer
5,316
import tensorflow as tf Args: prev_rnn_state: The state of the RNN from the previous timestep. prev_latent_encoded: Float Tensor of shape [batch_size, encoded_latent_size], the previous latent state z_{t-1} run through latent_encoder. inputs: A Tensor of shape [batch_size, data_size], the current inputs to the model. Most often this is x_{t-1}, the previous token in the observation sequence. Returns: rnn_out: The output of the RNN. rnn_state: The new state of the RNN. """ inputs_encoded = self.data_encoder(tf.to_float(inputs)) rnn_inputs = tf.concat([inputs_encoded, prev_latent_encoded], axis=1) rnn_out, rnn_state = self.rnn_cell(rnn_inputs, prev_rnn_state) return rnn_out, rnn_state def transition(self, rnn_out): """Computes the transition distribution p(z_t|h_t). Note that p(z_t | h_t) = p(z_t| z_{1:t-1}, x_{1:t-1}) Args: rnn_out: The output of the rnn for the current timestep. Returns:
tensorflow.to_float
5,317
import tensorflow as tf file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True,
tensorflow.logging.info
5,318
import tensorflow as tf @registry.register_model class DenseBitwiseCategoricalPolicy(PolicyBase): """Dense network with bitwise input and categorical output.""" def body(self, features): observations = features["inputs"] flat_x = tf.layers.flatten(observations) with tf.variable_scope("dense_bitwise"): flat_x = discretization.int_to_bit_embed(flat_x, 8, 32) x = tf.layers.dense(flat_x, 256, activation=tf.nn.relu) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) value = tf.layers.dense(x, 1)[..., 0] return {"target_policy": logits, "target_value": value}
tensorflow.layers.dense
5,319
import tensorflow as tf pos = beam_search.resize_like(pos, symbol) max_pos = beam_search.resize_like(max_pos, symbol) pos += tf.to_float(is_not_ins) if max_pos is not None: pos = tf.minimum(pos, tf.to_float(max_pos)) return pos
tensorflow.to_float
5,320
import tensorflow as tf eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) q_values = q_func(observations_ph.get(), num_actions, scope="q_func") deterministic_actions = tf.argmax(q_values, axis=1) batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr])
tensorflow.where
5,321
import tensorflow as tf with tf.Graph().as_default(): config = config_.get_hparams_cifar_38() config.add_hparam("n_classes", 10) config.add_hparam("dataset", "cifar-10") x = tf.random_normal( shape=(self.config.batch_size,) + self.config.input_shape) t = tf.random_uniform( shape=(self.config.batch_size,), minval=0, maxval=self.config.n_classes, dtype=tf.int32) global_step = tf.Variable(0., trainable=False) model = revnet.RevNet(config=config)
tensorflow.random_uniform
5,322
import tensorflow as tf config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess_limited = tf.Session(config=config) # How to set placements on multiple devices. # Here, assume we have three devies CPU:0, GPU:0, and GPU:1 if tf.test.is_built_with_cuda(): with tf.device('/cpu:0'): a = tf.constant([1.0, 3.0, 5.0], shape=[1, 3]) b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) with tf.device('/gpu:1'): c = tf.matmul(a,b) c = tf.reshape(c, [-1]) with tf.device('/gpu:2'):
tensorflow.constant
5,323
import tensorflow as tf if pt[0] > box_limits_x[1]: box_limits_x[1] = pt[0] if pt[1] < box_limits_z[0]: box_limits_z[0] = pt[1] if pt[1] > box_limits_z[1]: box_limits_z[1] = pt[1] mean_x = tf.reduce_mean(box_limits_x) mean_z = tf.reduce_mean(box_limits_z) else: mean_x = tf.reduce_mean(labeled_translations[:, 0]) mean_z = tf.reduce_mean(labeled_translations[:, 2]) samples_world = grid.generate( (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5), [self.resolution, self.resolution, self.resolution]) # samples_world = grid.generate( # (box_limits_x[0][0], box_limits_y[0], box_limits_z[0][0]), # (box_limits_x[1][0], box_limits_y[1], box_limits_z[1][0]), # [self.resolution, self.resolution, self.resolution]) # samples_world = grid.generate( # (-5.0, -5.0, -5.0), # (5.0, 5.0, 5.0), # [self.resolution, self.resolution, self.resolution]) samples_world = tf.reshape(samples_world, [-1, 3])
tensorflow.reduce_mean
5,324
import tensorflow as tf # if our output includes timesteps we need to reshape return tf.reshape(res, tf.shape(output)[:-1])
tensorflow.shape
5,325
import tensorflow as tf model_name = tu.get_model_name( "savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with tf.Session() as sess: input0_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_INPUT0:0") input1_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_INPUT1:0") output0_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT0:0") output1_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT1:0") tf.saved_model.simple_save(sess,
tensorflow.Session
5,326
import tensorflow as tf def _decode_masks(self, parsed_tensors): """Decode a set of PNG masks to the tf.float32 tensors.""" def _decode_png_mask(png_bytes): mask = tf.squeeze( tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1) mask = tf.cast(mask, dtype=tf.float32) mask.set_shape([None, None]) return mask height = parsed_tensors['image/height'] width = parsed_tensors['image/width']
tensorflow.cast
5,327
import tensorflow as tf # Do random crop + horizontal flip for each train image image = tf.pad(image, [[4, 4], [4, 4], [0, 0]])
tensorflow.pad
5,328
import tensorflow as tf pass else: # add a skip connection lstm_cell = tf.nn.rnn_cell.ResidualWrapper(lstm_cell) # collect the input state, run the dynamic rnn, collect
tensorflow.nn.rnn_cell.ResidualWrapper
5,329
import tensorflow as tf # loss = None with tf.name_scope(name, "click_loglikelihood"): ob_prob=tf.nn.softmax(propensity) rel_prob=tf.nn.softmax(train_output) click_prob=ob_prob*rel_prob click_prob_norm=click_prob/tf.reduce_sum(click_prob,axis=1,keep_dims=True) label_dis = labels/ tf.reduce_sum(labels, 1, keep_dims=True) entropy = tf.reduce_sum(tf.math.log(click_prob_norm)*label_dis,1) return tf.reduce_mean(entropy) def click_weighted_pairwise_loss(self, output, labels, propensity_weights, name=None): """Computes pairwise entropy loss with propensity weighting. Args: output: (tf.Tensor) A tensor with shape [batch_size, list_size]. Each value is
tensorflow.math.log
5,330
from tensorflow.python.ops import math_ops `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ top_k_idx = math_ops.to_int64(top_k_idx) weights = _mask_weights(ignore_mask, weights) tp, tp_update = _streaming_sparse_true_positive_at_k(
tensorflow.python.ops.math_ops.to_int64
5,331
import tensorflow as tf outputs_bw, (hidden_bw, output_bw) = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords) outputs = tf.concat([outputs_fw, outputs_bw], axis=-1) hidden = tf.concat([hidden_fw, hidden_bw], axis=-1) output = tf.concat([output_fw, output_bw], axis=-1) else: outputs = outputs_fw
tensorflow.concat
5,332
import tensorflow as tf writer.write(json.dumps(sentences, indent=2, ensure_ascii=False)) def main(_): tf.logging.set_verbosity(tf.logging.INFO) bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tensorflow.logging.set_verbosity
5,333
import tensorflow as tf classes, min_score_thresh=0.65, min_iou_thresh=0.5, is_class_agnostic=False) nms_masks_expected2 = tf.stack([mask0, mask1, mask4, mask2]) nms_scores_expected2 = tf.constant([1.0, 0.9, 0.85, 0.8], dtype=tf.float32) nms_classes_expected2 = tf.constant([1, 2, 2, 3], dtype=tf.int32) self.assertAllEqual(nms_masks1.numpy(), nms_masks_expected1.numpy())
tensorflow.stack
5,334
import tensorflow as tf operation = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale)) else: # Do not perturb, just assign. operation = tf.assign(perturbed_var, var) perturb_ops.append(operation) assert len(perturb_ops) == len(all_vars) return tf.group(*perturb_ops)
tensorflow.assign
5,335
import tensorflow as tf w = tf.get_variable('w', [self.H, self.D], initializer=self.weight_initializer) b = tf.get_variable('b', [self.D], initializer=self.const_initializer) w_att = tf.get_variable('w_att', [self.D, 1], initializer=self.weight_initializer) h_att = tf.nn.relu(features_proj + tf.expand_dims(tf.matmul(h, w), 1) + b) # (N, L, D) out_att = tf.reshape(tf.matmul(tf.reshape(h_att, [-1, self.D]), w_att), [-1, self.L]) # (N, L) alpha = tf.nn.softmax(out_att) context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1, name='context') #(N, D) return context, alpha def _selector(self, context, h, reuse=False): with tf.variable_scope('selector', reuse=reuse): w = tf.get_variable('w', [self.H, 1], initializer=self.weight_initializer) b = tf.get_variable('b', [1], initializer=self.const_initializer) beta = tf.nn.sigmoid(tf.matmul(h, w) + b, 'beta') # (N, 1) context = tf.multiply(beta, context, name='selected_context') return context, beta def _decode_lstm(self, x, h, context, dropout=False, reuse=False): with tf.variable_scope('logits', reuse=reuse): w_h = tf.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer) w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer) b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer) if dropout: h = tf.nn.dropout(h, 0.5) h_logits = tf.matmul(h, w_h) + b_h if self.ctx2out:
tensorflow.multiply
5,336
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature def _check_inputs(self, features, targets): if self._features_info is not None: if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) if self._targets_info is not None: if not tensor_signature.tensors_compatible(targets, self._targets_info): raise ValueError('Targets are incompatible with given information. ' 'Given targets: %s, required signatures: %s.' % (str(targets), str(self._targets_info))) else:
tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_signatures
5,337
import tensorflow as tf def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False
tensorflow.get_variable_scope
5,338
import tensorflow as tf NotImplementedError, "log_prob is not implemented"): terrible_distribution.log_prob(1.) with self.assertRaisesRegexp( NotImplementedError, "cdf is not implemented"): terrible_distribution.cdf(1.) with self.assertRaisesRegexp( NotImplementedError, "log_cdf is not implemented"): terrible_distribution.log_cdf(1.) if __name__ == "__main__": tf.test.main()
tensorflow.test.main
5,339
import tensorflow as tf constraint['sigmean'] = (5.20, 5.30) constraint['sigwidth'] = (0.001, 1.) constraint['argpar'] = (-100., -1.) constraint['nsig'] = (0., 10000) constraint['nbkg'] = (0., 10000) constraint['mes'] = (5.20, 5.30) # keep a variable dictionary for easy key-based access compatible with constraints vdict = {} pi = tf.constant(np.pi, dtype=tf.float64, name="pi") sqrt2pi = tf.constant(np.sqrt(2 * np.pi), dtype=tf.float64, name="sqrt2pi") two = tf.constant(2, dtype=tf.float64, name="two") one = tf.constant(1, dtype=tf.float64, name="one") zero = tf.constant(0, dtype=tf.float64, name="zero") def gradsafe_sqrt(x, clip_low=1e-18, name=None): with tf.name_scope(name, "gradsafe_sqrt"): return tf.sqrt(tf.clip_by_value(x, clip_low, x))
tensorflow.constant
5,340
import tensorflow.contrib.layers as layers with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
tensorflow.contrib.layers.flatten
5,341
from tensorflow.python.framework import ops if event_ndims is None: raise ValueError("event_ndims cannot be None") self._batch_ndims = batch_ndims self._event_ndims = event_ndims self._validate_args = validate_args with ops.name_scope(name) as ns: self._name = ns with ops.name_scope("init"): self._batch_ndims = self._assert_non_negative_int32_scalar( ops.convert_to_tensor( batch_ndims, name="batch_ndims")) self._batch_ndims_static, self._batch_ndims_is_0 = ( self._introspect_ndims(self._batch_ndims)) self._event_ndims = self._assert_non_negative_int32_scalar( ops.convert_to_tensor( event_ndims, name="event_ndims")) self._event_ndims_static, self._event_ndims_is_0 = ( self._introspect_ndims(self._event_ndims)) @property def name(self): """Name given to ops created by this class.""" return self._name @property def batch_ndims(self): """Returns number of dimensions corresponding to non-identical draws."""
tensorflow.python.framework.ops.convert_to_tensor
5,342
import tensorflow as tf idx2 = cell_arch[bi][2] block_use = tf.one_hot(idx1, ni, dtype=tf.int32) + tf.one_hot(idx2, ni, dtype=tf.int32) block_uses.append(block_use) block_uses = tf.add_n(block_uses) unused_indices = tf.reshape(tf.cast(tf.where(tf.equal(block_uses, 0)), tf.int32), [-1]) num_out_blocks = tf.size(unused_indices) # Select only unused blocks with tf.variable_scope('select'): stacked_blocks = tf.stack(cell_inputs + blocks) out_blocks = tf.gather(stacked_blocks, unused_indices, axis=0) out_blocks = tf.transpose(out_blocks, (1, 2, 3, 0, 4)) # Combine to constant channels with tf.variable_scope('combine'): W = self._make_var('W', (ni, block_ch * block_ch)) W = tf.gather(W, unused_indices, axis=0) W = tf.reshape(W, (1, 1, num_out_blocks * block_ch, block_ch))
tensorflow.stack
5,343
import tensorflow as tf config = copy.deepcopy(config) if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) if token_type_ids is None: token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) with tf.variable_scope(scope, default_name="bert"): with tf.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.word_embedding_output, self.output_embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.embedding_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings)
tensorflow.zeros
5,344
import tensorflow as tf obs_shape[:1] + [num_target_frames] + [action_space.n]), "target_value": tf.zeros( obs_shape[:1] + target_value_shape_suffix) } model.distributional_value_size = max(distributional_size, 1) model.use_epochs = hparams.use_epochs with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): t2t_model.create_dummy_vars() (targets, _) = model(features) target_values = targets["target_value"][:, 0] if distributional_size > 1: target_values = targets["target_value"][:, :]
tensorflow.get_variable_scope
5,345
import tensorflow as tf rgb = tf.floor(rgb/(2**(8-n_bits))) rgb = rgb/(n_bins) - 0.5 return rgb def post_process(self, rgb, add_dequantization_noise=True): n_bits = config.model.data.n_bits n_bins = 2**n_bits rgb_out = rgb # discretization noise if add_dequantization_noise: shape = tf.shape(rgb_out) rgb_out += tf.random_uniform(shape=shape)*(1/n_bins) return rgb_out
tensorflow.random_uniform
5,346
import tensorflow as tf def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(label_ids.shape[0], dtype=tf.float32)
tensorflow.logging.info
5,347
import tensorflow as tf if output_dtype is None: raise TypeError('Tensor type %r is not supported' % x.dtype) with tf.compat.v1.name_scope('tukey_parameters'): x = tf.cast(x, output_dtype) (count_l1, l1, count_l2, l2, count_l3, l3, count_l4, l4) = ( tf_utils.reduce_batch_count_l_moments(x, reduce_instance_dims))
tensorflow.cast
5,348
from tensorflow.python.ops import gen_nn_ops name: A name for the operation (optional). Returns: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss. """ # The second output tensor contains the gradients. We use it in # _CrossEntropyGrad() in nn_grad but not here. cost, unused_backprop = gen_nn_ops._softmax_cross_entropy_with_logits( logits, labels, name=name) return cost def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None): """Computes sparse softmax cross entropy between `logits` and `labels`.
tensorflow.python.ops.gen_nn_ops._softmax_cross_entropy_with_logits
5,349
import tensorflow as tf facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
tensorflow.shape
5,350
import tensorflow as tf def validation_mapper(byte): image = tf.image.decode_jpeg( tf.reshape(byte, shape=[]), 3, **JPEG_OPT) image = resize_shortest_edge(image, tf.shape(image), 256) image = center_crop(image, 224) image = tf.reverse(image, axis=[2]) # to BGR return image def training_mapper(byte): jpeg_shape = tf.image.extract_jpeg_shape(byte) # hwc bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box( jpeg_shape, bounding_boxes=tf.zeros(shape=[0, 0, 4]), min_object_covered=0, aspect_ratio_range=[0.75, 1.33], area_range=[0.08, 1.0], max_attempts=10, use_image_if_no_bounding_boxes=True)
tensorflow.image.extract_jpeg_shape
5,351
import tensorflow as tf As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). Args: image: a Tensor. Returns: Tensor of the same shape as image. """ image = tf.image.resize_with_crop_or_pad(image, 40, 40) image = tf.image.random_crop(image, [32, 32, 3]) image = tf.image.random_flip_left_right(image) return image # Makes the function accessible in gin configs, even with all args denylisted. @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def cifar10_augmentation_preprocess(dataset, training):
tensorflow.image.resize_with_crop_or_pad
5,352
import tensorflow as tf # Optimisation hyperparameters tf.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)') tf.app.flags.DEFINE_float('learning-rate', 1e-4, 'Learning rate (default: %(default)d)') tf.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)') tf.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)') tf.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)') tf.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)')
tensorflow.app.flags.DEFINE_integer
5,353
import tensorflow as tf logits = tf.nn.xw_plus_b(tweet_embedding, hidden, bias) self.probs = tf.nn.softmax(logits) self._xent = tf.nn.softmax_cross_entropy_with_logits(logits, self.y) self.cost = tf.reduce_mean(self.example_weights * self._xent)
tensorflow.nn.softmax_cross_entropy_with_logits
5,354
import tensorflow as tf net_1 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_1', phase_train, use_batch_norm, weight_decay) net_2 = convLinear(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name+'_2', phase_train, use_batch_norm, weight_decay) out = tf.maximum(net_1, net_2) return out def affine(inpOp, nIn, nOut, name, weight_decay=0.0): with tf.variable_scope(name): l2_regularizer = lambda t: l2_loss(t, weight=weight_decay) weights = tf.get_variable("weights", [nIn, nOut], initializer=tf.truncated_normal_initializer(stddev=1e-1), regularizer=l2_regularizer, dtype=inpOp.dtype) biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype) affine1 = tf.nn.relu_layer(inpOp, weights, biases) return affine1 def l2_loss(tensor, weight=1.0, scope=None): """Define a L2Loss, useful for regularize, i.e. weight decay. Args: tensor: tensor to regularize. weight: an optional weight to modulate the loss. scope: Optional scope for op_scope. Returns: the L2 loss op. """ with tf.name_scope(scope):
tensorflow.nn.relu_layer
5,355
import tensorflow as tf h_pool5 = h_conv5 h_pool5 = tf.reshape(h_pool5, shape=[-1, POOL_X, POOL_Y]) feature_mat = h_pool5 print("----feature_mat-----") print(feature_mat)
tensorflow.reshape
5,356
import tensorflow as tf def _evaluate_legendre_polynomial_loop_body(x, n, l, m, pmm, pmm1): n_float = tf.cast(n, dtype=x.dtype) m_float = tf.cast(m, dtype=x.dtype) pmn = (x * (2.0 * n_float - 1.0) * pmm1 - (n_float + m_float - 1) * pmm) / ( n_float - m_float) pmm = tf.where(tf.less_equal(n, l), pmm1, pmm) pmm1 = tf.where(tf.less_equal(n, l), pmn, pmm1) n += 1 return x, n, l, m, pmm, pmm1 def _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1):
tensorflow.less_equal
5,357
from tensorflow.python.ops import variable_scope as vs [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) new_h = (1. - att_score) * state + att_score * c return new_h, new_h
tensorflow.python.ops.variable_scope.variable_scope
5,358
import tensorflow as tf "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities)
tensorflow.nn.bias_add
5,359
import tensorflow as tf class A2C(Base): def __init__(self, env, summary_dir='./', gpu=False): self.LR = 1e-4 self.MINIBATCH = 32 self.EPOCHS = 8 self.EPSILON = 0.2 self.EPS_LEN = 100000 # GPU setup os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage')
tensorflow.ConfigProto
5,360
import tensorflow as tf ) # Masking tensor. left_mask = _left_mask( batch_shaped( band_features["before_padding"]), window_size) right_mask = _right_mask( batch_shaped(band_features["after_padding"]), window_size ) self.mask = batch_2win_shaped( tf.logical_and( tf.concat([left_mask, right_mask], axis=1), tile_to_2win(self.in_window) ) ) def dflux_dt(self, clip_magnitude: typing.Optional[float]) -> tf.Tensor: """Computes dflux/dt. :param clip_magnitude: Option for clipping the magnitude, if dt might be very small. :return: <float>[batch_size, 2 * window_size] dflux/dt tensor. """ result = self.dflux / self.dtime if clip_magnitude is not None: result = tf.clip_by_value(
tensorflow.concat
5,361
import tensorflow as tf w_shape = [int(w) for w in weights.get_shape()] if len(w_shape) > 1 and int(w_shape[-2]) > 1: # Full convolutions if symmetric_weights: g = tf.get_default_graph() with g.gradient_override_map({'Conv3D': 'SymmetricConv3D'}): activities = tf.nn.conv3d( data,
tensorflow.get_default_graph
5,362
import tensorflow as tf dtype=tf.float32) mask1 = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1],
tensorflow.constant
5,363
import tensorflow as tf feature_emb_list.append(speaker_pair_emb) tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb] feature_emb_list.append(tiled_genre_emb) if self.config["use_features"]: antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c] antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c] feature_emb_list.append(antecedent_distance_emb) feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb] feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb] target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb] similarity_emb = top_antecedent_emb * target_emb # [k, c, emb] target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb] pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb] with tf.variable_scope("slow_antecedent_scores"):
tensorflow.concat
5,364
import tensorflow as tf """ def mlp_actor_critic(x, a, hidden_sizes=(400,300), activation=tf.nn.relu, output_activation=tf.tanh, action_space=None, dropout_rate=0, nn_type='mlp_variational'): act_dim = a.shape.as_list()[-1] act_limit = action_space.high[0] if nn_type == 'mlp': with tf.variable_scope('pi'): pi = act_limit * mlp(x, list(hidden_sizes) + [act_dim], activation, output_activation) with tf.variable_scope('q1'): q1 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q2'): q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q1', reuse=True): q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) elif nn_type == 'mlp_dropout': with tf.variable_scope('pi'): pi = act_limit * mlp_dropout(x, list(hidden_sizes)+[act_dim], activation, output_activation) with tf.variable_scope('q'): q = tf.squeeze(mlp_dropout(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1) with tf.variable_scope('q', reuse=True): q_pi = tf.squeeze(mlp_dropout(tf.concat([x,pi], axis=-1), list(hidden_sizes)+[1], activation, None, dropout_rate), axis=1)
tensorflow.variable_scope
5,365
import tensorflow as tf batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100)
tensorflow.data.TFRecordDataset
5,366
import tensorflow as tf with tf.variable_scope('q1'): q1_in_ph = tf.concat([x, a], axis=-1) q1_in_dim = q1_in_ph.shape.as_list()[1] q1_dropout_mask_generator = DropoutMaskGenerator(q1_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) q1_dropout_mask_phs = q1_dropout_mask_generator.generate_dropout_mask_placeholders() q1, q1_reg = mlp_variational(q1_in_ph, q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1 = tf.squeeze(q1, axis=2) with tf.variable_scope('q1', reuse=True): q1_pi, q1_pi_reg = mlp_variational(tf.concat([x, pi[0]], axis=-1), q1_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q1_pi = tf.squeeze(q1_pi, axis=2) with tf.variable_scope('q2'): q2_in_ph = tf.concat([x, a], axis=-1) q2_in_dim = q2_in_ph.shape.as_list()[1] q2_dropout_mask_generator = DropoutMaskGenerator(q2_in_dim, hidden_sizes, model_prob=1.0 - dropout_rate) q2_dropout_mask_phs = q2_dropout_mask_generator.generate_dropout_mask_placeholders() q2, q2_reg = mlp_variational(q2_in_ph, q2_dropout_mask_phs, list(hidden_sizes) + [1], activation, None, dropout_rate) q2 = tf.squeeze(q2, axis=2) else: raise ValueError('Please choose a proper nn_type!') return pi, pi_reg, pi_dropout_mask_generator, pi_dropout_mask_phs,\ q1, q1_reg, q1_dropout_mask_generator, q1_dropout_mask_phs, q1_pi, q1_pi_reg,\ q2, q2_reg, q2_dropout_mask_generator, q2_dropout_mask_phs
tensorflow.concat
5,367
import tensorflow as tf raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 train_graph = tf.Graph() eval_graph = tf.Graph() infer_graph = tf.Graph() with train_graph.as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope('Train'): train_input = DataInput(config=config, data=train_data, name='TrainInput') with tf.variable_scope('Model', reuse=None, initializer=initializer): m = Model(is_training=True, config=config, input_=train_input, graph=train_graph)
tensorflow.Graph
5,368
import tensorflow as tf return loss def contra_step_lossV3(pred, tgt, margin=1.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin) loss = tf.reduce_mean(loss) return loss def contra_step_lossV4(pred, tgt): # 50*50 # Step-wise contrastive loss
tensorflow.where
5,369
import tensorflow as tf LayersConfig.set_keep[name] = tf.placeholder(tf.float32) self.outputs = tf.nn.dropout(self.inputs, LayersConfig.set_keep[name], seed=seed, name=name) # 1.2
tensorflow.nn.dropout
5,370
import tensorflow as tf tf.flags.DEFINE_string(
tensorflow.flags.DEFINE_string
5,371
import tensorflow as tf with tf.name_scope('preprocess'): A = tf.transpose(A / 128.0 - 1.0, [0, 3, 1, 2]) B = tf.transpose(B / 128.0 - 1.0, [0, 3, 1, 2]) def viz3(name, a, b, c): with tf.name_scope(name): im = tf.concat([a, b, c], axis=3) im = tf.transpose(im, [0, 2, 3, 1]) im = (im + 1.0) * 128 im = tf.clip_by_value(im, 0, 255) im = tf.cast(im, tf.uint8, name='viz') tf.summary.image(name, im, max_outputs=50)
tensorflow.concat
5,372
import tensorflow as tf if attention_mask is not None: attention_mask = tf.reshape(
tensorflow.reshape
5,373
import tensorflow as tf self.loss = self.loss1 + self.loss2 self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss) # In[3]: tf.reset_default_graph() sess = tf.InteractiveSession() size_layers = 128 learning_rate = 1e-3 num_layers = 2
tensorflow.reset_default_graph
5,374
import tensorflow as tf for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.histogram_summary(var.op.name + ':gradient', grad_values)) summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries class DeploymentConfig(object): """Configuration for deploying a model with `deploy()`. You can pass an instance of this class to `deploy()` to specify exactly how to deploy the model to build. If you do not pass one, an instance built from the default deployment_hparams will be used. """
tensorflow.logging.info
5,375
import tensorflow as tf def loss(self, logits, forward_only=None): cost = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self.y, tf.float32)) mean_cost = tf.reduce_mean(cost)
tensorflow.cast
5,376
import tensorflow as tf Args: private_samples: a tensor of shape [num_samples, num_features]. shared_samples: a tensor of shape [num_samples, num_features]. weight: the weight of the incoherence loss. name: the name of the tf summary. """ with tf.name_scope(name): private_samples -= tf.reduce_mean(private_samples, 0) shared_samples -= tf.reduce_mean(shared_samples, 0) private_samples = tf.nn.l2_normalize(private_samples, 1) shared_samples = tf.nn.l2_normalize(shared_samples, 1) correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True) cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight cost = tf.where(cost > 0, cost, 0, name='value') assert_op = tf.Assert(tf.is_finite(cost), [cost]) with tf.control_dependencies([assert_op]): barrier = tf.no_op(name)
tensorflow.nn.l2_normalize
5,377
import tensorflow as tf attn_dist = add_first_word_prob_to_atten_dists(self.in_passage_words, self.phrase_starts, vocab_dist, attn_dist) # match attn_dist[batch_size, passage_length] to sparse one-hot representation [batch_size, passage_length, extended_vsize] batch_nums = tf.range(0, limit=batch_size) # shape (batch_size) batch_nums = tf.expand_dims(batch_nums, axis=1) # shape (batch_size, 1) batch_nums = tf.tile(batch_nums, [1, passage_length]) # shape (batch_size, passage_length) step_nums = tf.range(0, limit=passage_length) # [passage_length] step_nums = tf.expand_dims(step_nums, axis=0) # shape (1, passage_length) step_nums = tf.tile(step_nums, [batch_size, 1]) # shape (batch_size, passage_length) indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3) indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3] indices = tf.cast(indices, tf.int64)
tensorflow.range
5,378
import tensorflow as tf optimizer = tf.train.AdadeltaOptimizer(learning_rate) elif params['optimizer'] == 'adagrad': optimizer = tf.train.AdagradOptimizer(learning_rate) elif params['optimizer'] == 'rmsprop': optimizer = tf.train.RMSPropOptimizer( learning_rate, momentum=params['momentum']) elif params['optimizer'] == 'lars': optimizer = tf.contrib.opt.LARSOptimizer(
tensorflow.train.RMSPropOptimizer
5,379
import tensorflow as tf @property def summary_added(self): return self._summary_added return Layer @layer def identity_layer(tensor, **opts): out = tf.identity(tensor) return out @layer def embedding_layer(tensor, vocab_size=None, embedding_dim=None, embedding_matrix=None, **opts): if embedding_matrix is None: initializer = tf.contrib.layers.xavier_initializer(uniform=True) embedding_matrix = tf.get_variable("embedding_matrix", initializer=initializer(shape=(vocab_size, embedding_dim)))
tensorflow.identity
5,380
import tensorflow as tf aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size]) aggregated_lm_emb *= self.lm_scaling context_emb_list.append(aggregated_lm_emb) context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb] head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb] context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]
tensorflow.concat
5,381
import tensorflow as tf """ :description: Placeholders """ def _setup_placeholders(self): if self.demo: self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [None, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [None], "answer_label1") self.end_label = tf.placeholder(tf.int32, [None], "answer_label2") else: self.c = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len],
tensorflow.placeholder
5,382
import tensorflow as tf bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features(
tensorflow.contrib.tpu.TPUEstimator
5,383
import tensorflow as tf summaries = { 'regularization_loss/KL/PriorMean/Mean': tf.math.reduce_mean(tf.constant(prior_mean)), 'regularization_loss/KL/PriorVar/Mean': tf.math.reduce_mean(tf.constant(prior_stddev)**2), 'regularization_loss/KL/Loss/Original': loss, 'regularization_loss/KL/Loss/Weighted':
tensorflow.constant
5,384
import tensorflow as tf summed = tf.scan(lambda a, x: x[0] + x[1] * a, sequences, initializer=tf.convert_to_tensor(initial_value), parallel_iterations=1,
tensorflow.convert_to_tensor
5,385
import tensorflow as tf """Name of dataset: imagenet or flowers. If not specified, it is automatically guessed based on --data_dir.""") tf.flags.DEFINE_string('resize_method', 'bilinear', """Method for resizing input images: crop,nearest,bilinear,bicubic or area. The 'crop' mode requires source images to be at least as large as the network input size, while the other modes support any sizes and apply random bbox distortions before resizing (even with --nodistortions).""") tf.flags.DEFINE_boolean('distortions', True, """Enable/disable distortions during image preprocessing. These include bbox and color distortions.""") tf.flags.DEFINE_string('local_parameter_device', 'gpu', """Device to use as parameter server: cpu or gpu. For distributed training, it can affect where caching of variables happens.""") tf.flags.DEFINE_string('device', 'gpu', """Device to use for computation: cpu or gpu""") #tf.flags.DEFINE_string('data_format', 'NCHW', tf.flags.DEFINE_string('data_format', 'NHWC', """Data layout to use: NHWC (TF native) or NCHW (cuDNN native).""") tf.flags.DEFINE_integer('num_intra_threads', 1, """Number of threads to use for intra-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_integer('num_inter_threads', 0,
tensorflow.flags.DEFINE_string
5,386
import tensorflow as tf self.assign_vars = [] for var in tf.global_variables(): v = self.var_ema.average(var)
tensorflow.global_variables
5,387
import tensorflow as tf def nin(x, num_units, **kwargs): s = tf.shape(x) sh = x.get_shape().as_list() x = tf.reshape(x, [tf.reduce_prod(s[:-1]), sh[-1]]) x = dense(x, num_units, **kwargs) return tf.reshape(x, [-1] + sh[1:-1] + [num_units]) def dense(x, num_units, scope="dense", training=True, ema=None, init=False, bias_initializer=tf.constant_initializer(0.)): with tf.variable_scope(scope): V = tf.get_variable('V', shape=[int(x.get_shape()[1]), num_units], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.05), trainable=True) g = tf.get_variable('g', shape=[num_units], dtype=tf.float32, initializer=tf.constant_initializer(1.), trainable=True) b = tf.get_variable('b', shape=[num_units], dtype=tf.float32, initializer=bias_initializer, trainable=True) def maybe_avg(v): if ema is not None and not init: v = tf.cond(training, lambda: v, lambda: ema.average(v))
tensorflow.variable_scope
5,388
import tensorflow as tf q1 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q2'): q2 = tf.squeeze(mlp(tf.concat([x, a], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) with tf.variable_scope('q1', reuse=True): q1_pi = tf.squeeze(mlp(tf.concat([x, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) elif nn_type == 'mlp_dropout': with tf.variable_scope('pi'): pi = act_limit * mlp_dropout(x, list(hidden_sizes)+[act_dim], activation, output_activation)
tensorflow.concat
5,389
from tensorflow.python.framework import ops # gradients and the attrs, but if we do not know orig_input_shape # statically, then we are unlikely to know the shape of the # gradients either. return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("Conv2DBackpropFilter") def _Conv2DBackpropFilterShape(op): """Shape function for the Conv2DBackpropFilter op.""" filter_shape = tensor_util.constant_value(op.inputs[1]) if filter_shape is not None: return [tensor_shape.TensorShape(filter_shape.tolist())]
tensorflow.python.framework.ops.RegisterShape
5,390
from tensorflow.python.ops import math_ops indices_at_minval = math_ops.equal( math_ops.abs(sensitivities - sensitivity), min_val) indices_at_minval = math_ops.to_int64(indices_at_minval) indices_at_minval = math_ops.cumsum(indices_at_minval) tf_index = math_ops.argmax(indices_at_minval, 0) tf_index = math_ops.cast(tf_index, dtypes.int32)
tensorflow.python.ops.math_ops.cumsum
5,391
import tensorflow as tf k_width, d_height=1, d_width=1, mode='SAME', input_layer=None, num_channels_in=None, batch_norm=None, activation='relu'): if input_layer is None: input_layer = self.top_layer if num_channels_in is None: num_channels_in = self.top_size name = 'conv' + str(self.counts['conv']) self.counts['conv'] += 1 with tf.variable_scope(name): strides = [1, d_height, d_width, 1] if self.data_format == 'NCHW': strides = [strides[0], strides[3], strides[1], strides[2]] if mode != 'SAME_RESNET': conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding=mode, data_format=self.channel_pos, use_bias=False) else: # Special padding mode for ResNet models if d_height == 1 and d_width == 1:
tensorflow.variable_scope
5,392
import tensorflow as tf deconv_shape2 = image_net["pool3"].get_shape() W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2") b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2") conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"])) fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2") shape = tf.shape(image) deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS]) W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3") b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3") conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8) annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction") return tf.expand_dims(annotation_pred, dim=3), conv_t3 def train(loss_val, var_list): optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) grads = optimizer.compute_gradients(loss_val, var_list=var_list) if FLAGS.debug: # print(len(var_list)) for grad, var in grads: utils.add_gradient_summary(grad, var) return optimizer.apply_gradients(grads) def main(argv=None):
tensorflow.expand_dims
5,393
import tensorflow as tf marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k] log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k] return log_norm - marginalized_gold_scores # [k] def bucket_distance(self, distances): """ Places the given values (designed for distances) into 10 semi-logscale buckets: [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+]. """ logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3 use_identity = tf.to_int32(distances <= 4) combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx return tf.clip_by_value(combined_idx, 0, 9) def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb): k = util.shape(top_span_emb, 0) c = util.shape(top_antecedents, 1)
tensorflow.to_float
5,394
import tensorflow as tf pc_lr = pc_lr_decay_fn(learning_rate=1., global_step=global_step) pc_lr_true = tf.train.piecewise_constant(
tensorflow.train.piecewise_constant
5,395
import tensorflow as tf self.features = tf.placeholder(tf.float32, [None, self.L, self.D]) self.captions = tf.placeholder(tf.int32, [None, self.T + 1]) def _get_initial_lstm(self, features): with tf.variable_scope('initial_lstm'): features_mean = tf.reduce_mean(features, 1) w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer)
tensorflow.variable_scope
5,396
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression def testSyntheticDataset(self): true_w = tf.random_uniform([3, 1]) true_b = [1.0] batch_size = 10 num_batches = 2 noise_level = 0. dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level, batch_size, num_batches) it = tfe.Iterator(dataset) for _ in range(2): (xs, ys) = it.next() self.assertEqual((batch_size, 3), xs.shape)
tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.synthetic_dataset
5,397
from tensorflow.python.framework import ops @ops.RegisterShape("Relu6Grad") @ops.RegisterShape("EluGrad") @ops.RegisterShape("SoftplusGrad") @ops.RegisterShape("SoftsignGrad") def _BinaryElementwiseShape(op): """Returns same shape as both inputs to op.
tensorflow.python.framework.ops.RegisterShape
5,398
from tensorflow.python.ops import array_ops """Gated recurrent unit (GRU) with nunits cells.""" if self._gate_linear is None: bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate. self._gate_linear = _Linear( [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) new_h = (1. - att_score) * state + att_score * c return new_h, new_h
tensorflow.python.ops.array_ops.split
5,399