seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf ema_op = self.var_ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]):
tensorflow.control_dependencies
4,800
import tensorflow as tf tf.gradients(ys=self.T, xs=self.X), self.X if self.has_multiple_inputs else [self.X])] @classmethod def nonlinearity_grad_override(cls, op, grad): output = op.outputs[0] input = op.inputs[0] return grad * output / (input + eps * tf.compat.v1.where(input >= 0, tf.ones_like(input), -1 * tf.ones_like(input))) """ Integrated Gradients https://arxiv.org/pdf/1703.01365.pdf """ class IntegratedGradients(GradientBasedMethod):
tensorflow.ones_like
4,801
import tensorflow as tf avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs) norm_grads_q = tf.global_norm(grads_q) norm_grads_policy = tf.global_norm(grads_policy) else: grads = tf.gradients(loss, self.params)
tensorflow.global_norm
4,802
import tensorflow as tf name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([seq_length], tf.int64), "is_real_example": tf.FixedLenFeature([1], tf.int64), }
tensorflow.FixedLenFeature
4,803
import tensorflow as tf @registry.register_model class FeedForwardCategoricalPolicy(PolicyBase): """Feed-forward categorical.""" def body(self, features): observations = features["inputs_raw"] observations = tf.cast(observations, tf.float32) flat_observations = tf.layers.flatten(observations) with tf.variable_scope("policy"): x = flat_observations for size in self.hparams.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) logits = tf.expand_dims(logits, axis=1) with tf.variable_scope("value"): x = flat_observations for size in self.hparams.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1) logits = clip_logits(logits, self.hparams) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicy(PolicyBase):
tensorflow.layers.dense
4,804
import tensorflow as tf tf.app.flags.DEFINE_integer('ws_nb_rlouts', 200, 'WS: # of roll-outs for the RL agent') tf.app.flags.DEFINE_integer('ws_nb_rlouts_min', 50, 'WS: minimal # of roll-outs for the RL agent to start training') tf.app.flags.DEFINE_string('ws_reward_type', 'single-obj', 'WS: reward type (\'single-obj\' OR \'multi-obj\')') tf.app.flags.DEFINE_float('ws_lrn_rate_rg', 3e-2, 'WS: learning rate for layerwise regression') tf.app.flags.DEFINE_integer('ws_nb_iters_rg', 20, 'WS: # of iterations for layerwise regression') tf.app.flags.DEFINE_float('ws_lrn_rate_ft', 3e-4, 'WS: learning rate for global fine-tuning') tf.app.flags.DEFINE_integer('ws_nb_iters_ft', 400, 'WS: # of iterations for global fine-tuning') tf.app.flags.DEFINE_integer('ws_nb_iters_feval', 25, 'WS: # of iterations for fast evaluation') tf.app.flags.DEFINE_float('ws_prune_ratio_exp', 3.0, 'WS: pruning ratio\'s exponent term') tf.app.flags.DEFINE_float('ws_iter_ratio_beg', 0.1, 'WS: iteration ratio (at starting time)') tf.app.flags.DEFINE_float('ws_iter_ratio_end', 0.5, 'WS: iteration ratio (at ending time)') tf.app.flags.DEFINE_float('ws_mask_update_step', 500, 'WS: step size for updating the pruning mask') def calc_prune_ratio(vars_list): """Calculate the overall pruning ratio for the given list of variables. Args: * vars_list: list of variables Returns:
tensorflow.app.flags.DEFINE_float
4,805
import tensorflow as tf return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32)) scale *= rec return scale @register_keras_serializable(
tensorflow.argmin
4,806
import tensorflow as tf target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/target_q_func") # q scores for actions which we know were selected in the given state. q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1) # compute estimate of best possible value starting from state at t + 1 if double_q: q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True) q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1) q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1) else: q_tp1_best = tf.reduce_max(q_tp1, 1) q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best # compute RHS of bellman equation q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked # compute the error (potentially clipped) td_error = q_t_selected - tf.stop_gradient(q_t_selected_target) errors = U.huber_loss(td_error) weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tensorflow.reduce_max
4,807
import tensorflow as tf if gradient_clip is not None: clipped_grads = [ (tf.clip_by_value(grad, -gradient_clip, +gradient_clip), var) for grad, var in avg_grads ] else: clipped_grads = avg_grads if FLAGS.optimizer == 'momentum': opt = tf.train.MomentumOptimizer( learning_rate, FLAGS.momentum, use_nesterov=True) elif FLAGS.optimizer == 'sgd': opt = tf.train.GradientDescentOptimizer(learning_rate) elif FLAGS.optimizer == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate, FLAGS.rmsprop_decay, momentum=FLAGS.rmsprop_momentum, epsilon=FLAGS.rmsprop_epsilon) else: raise ValueError('Optimizer "%s" was not recognized', FLAGS.optimizer) self.variable_mgr.append_apply_gradients_ops( gradient_state, opt, clipped_grads, training_ops) train_op = tf.group(*(training_ops + update_ops + extra_nccl_ops)) with tf.device(self.cpu_device):
tensorflow.train.GradientDescentOptimizer
4,808
import tensorflow as tf sigma2 = sigma * sigma inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets)) smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32) smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2) smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2) smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign), tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0)))) outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result) return outside_mul def xdet_model_fn(features, labels, mode, params): """Our model_fn for ResNet to be used with our Estimator."""
tensorflow.subtract
4,809
from tensorflow.contrib import tpu as contrib_tpu mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = contrib_tpu.TPUEstimatorSpec( mode=mode, predictions={ "probabilities": probabilities, "predictions": predictions
tensorflow.contrib.tpu.TPUEstimatorSpec
4,810
import tensorflow as tf stddev=1.0 / math.sqrt(float(128))), name="weights") biases = tf.Variable(tf.zeros([32]), name="biases") hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) # Linear with tf.name_scope("softmax_linear"): weights = tf.Variable(
tensorflow.matmul
4,811
import tensorflow as tf infer_step, [result, logits, loss], shape_invariants=[ tf.TensorShape([None, None, None, None]), tf.TensorShape([None, None, None, None, None]),
tensorflow.TensorShape
4,812
import tensorflow as tf cropped_image = scipy.misc.imresize(image, [resize_height, resize_width]) return np.array(cropped_image)/127.5 - 1. def inverse_transform(images): return (images+1.)/2. def lrelu(x, leak=0.2, name="lrelu"): return tf.maximum(x, leak*x) def conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="conv2d"): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) # print("c", w.get_shape()) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) return conv class batch_norm(object): def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x):
tensorflow.nn.conv2d
4,813
import tensorflow as tf max_seq_length, tokenizer) features.append(feature) return features # add by wangxiao # define the inputs of signature def serving_input_fn(): label_ids = tf.placeholder(tf.int32, [None], name='label_ids') input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids') input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask') segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids') input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({ 'label_ids': label_ids, 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids,
tensorflow.placeholder
4,814
import tensorflow as tf with tf.variable_scope(self.model_scope): # loss & extra evaluation metrics logits = self.forward_train(images) self.maskable_var_names = [var.name for var in self.maskable_vars] loss, metrics = self.calc_loss(labels, logits, self.trainable_vars) if FLAGS.enbl_dst: loss += self.helper_dst.calc_loss(logits, logits_dst) tf.summary.scalar('loss', loss) for key, value in metrics.items(): tf.summary.scalar(key, value) # learning rate schedule self.global_step = tf.train.get_or_create_global_step() lrn_rate, self.nb_iters_train = self.setup_lrn_rate(self.global_step)
tensorflow.summary.scalar
4,815
import tensorflow as tf "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
tensorflow.flags.DEFINE_string
4,816
import tensorflow as tf # Set up our moving statistics. When connecting in parallel, this is shared. self._moving_mean = tf.get_variable(
tensorflow.get_variable
4,817
import tensorflow as tf model.export_ops(name) metagraph = tf.train.export_meta_graph() if tf.__version__ < "1.1.0" and FLAGS.num_gpus > 1: raise ValueError("num_gpus > 1 is not supported for TensorFlow versions " "below 1.1.0") soft_placement = False if FLAGS.num_gpus > 1: soft_placement = True util.auto_parallel(metagraph, m) with tf.Graph().as_default(): tf.train.import_meta_graph(metagraph) for model in models.values(): model.import_ops() sv = tf.train.Supervisor(logdir=FLAGS.save_path) config_proto = tf.ConfigProto(allow_soft_placement=soft_placement) with sv.managed_session(config=config_proto) as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
tensorflow.train.import_meta_graph
4,818
import tensorflow as tf for i in range(2, 2 + len(expected_extra_dims)): mask = tf.expand_dims(mask, axis=i) mask = tf.tile(mask, [1, 1] + expected_extra_dims) return tf.where(mask, expanded_tensor, if_masked_tensor) def initial_layer( window_feature: WindowFeatures, *, clip_magnitude=10.0, include_flux_and_time=False ) -> tf.Tensor: features = tf.expand_dims(window_feature.dflux_dt(clip_magnitude=clip_magnitude), 2) if include_flux_and_time: dflux = tf.expand_dims(window_feature.dflux, 2) dtime = tf.expand_dims(window_feature.dtime, 2) features = tf.concat([features, dflux, dtime], axis=2, name="initial_layer_concat") return features class CutoffData: def __init__(self, config_json: dict): self.window_size: int = config_json["window_size"] self.band_time_diff: int = config_json["band_time_diff"] self.embedding_size: int = config_json["desired_num_cutoffs"]
tensorflow.expand_dims
4,819
import tensorflow as tf if len(env.observation_space.shape) == 1: # This means we are running on low-dimensional observations (e.g. RAM) input_shape = env.observation_space.shape else: img_h, img_w, img_c = env.observation_space.shape input_shape = (img_h, img_w, frame_history_len * img_c) num_actions = env.action_space.n # set up placeholders # placeholder for current observation (or state) obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape)) # placeholder for current action act_t_ph = tf.placeholder(tf.int32, [None]) # placeholder for current reward rew_t_ph = tf.placeholder(tf.float32, [None]) # placeholder for next observation (or state) obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape)) # placeholder for end of episode mask # this value is 1 if the next state corresponds to the end of an episode, # in which case there is no Q-value at the next state; at the end of an # episode, only the current state reward contributes to the target, not the # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1) done_mask_ph = tf.placeholder(tf.float32, [None]) # casting to float on GPU ensures lower data transfer times. obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0 obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0
tensorflow.placeholder
4,820
import tensorflow as tf params: dict, user passed parameters. Returns: List of tensors from fromRGB 1x1 `Conv2D` layers. """ with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE): # Get fromRGB layer properties. from_rgb = [ params["discriminator_from_rgb_layers"][i][0][:] for i in range(len(params["discriminator_from_rgb_layers"]))
tensorflow.variable_scope
4,821
import tensorflow as tf layer["weights"] = tf.get_variable(name + "/weights", dtype=tf.float32, initializer=Winit) layer["bias"] = tf.get_variable(name + "/bias", dtype=tf.float32, initializer=binit)
tensorflow.get_variable
4,822
import tensorflow as tf last_forward.set_shape([None, cell_output_size]) if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states) encoder_state_ = tf.concat(encoder_states_, axis=1) elif encoder.final_state == 'average': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1)
tensorflow.reduce_sum
4,823
import tensorflow as tf print(image.shape) print(mask.shape) input_image = np.concatenate([image, mask], axis=2) print(input_image.shape) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: input_image = tf.constant(input_image, dtype=tf.float32) output = MODEL.build_server_graph(FLAGS, input_image) output = (output + 1.) * 127.5 output = tf.reverse(output, [-1]) output = tf.saturate_cast(output, tf.uint8) # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(MODEL_DIR, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops)
tensorflow.reverse
4,824
import tensorflow as tf saver.export_meta_graph(filename, as_text=True) with self.test_session(graph=tf.Graph()): # Imports the text format graph. tf.train.import_meta_graph(filename) # Writes wrong contents to the file. tf.train.write_graph(saver.as_saver_def(), os.path.dirname(filename), os.path.basename(filename)) with self.test_session(graph=tf.Graph()): # Import should fail. with self.assertRaisesWithPredicateMatch( IOError, lambda e: "Cannot parse file"): tf.train.import_meta_graph(filename) # Deletes the file gfile.Remove(filename)
tensorflow.Graph
4,825
import tensorflow as tf grads_q = tf.gradients(loss_q * self.q_coef, self.params) grads = [gradient_add(g1, g2, param, verbose=self.verbose) for (g1, g2, param) in zip(grads_policy, grads_q, self.params)] avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs) norm_grads_q = tf.global_norm(grads_q) norm_grads_policy = tf.global_norm(grads_policy) else: grads = tf.gradients(loss, self.params)
tensorflow.global_norm
4,826
import tensorflow as tf tf.add_to_collection(name, op) self._initial_state_name = self.with_prefix(self._name, 'initial') self._final_state_name = self.with_prefix(self._name, 'final') for state_tuple in self._initial_state: tf.add_to_collection(self._initial_state_name, state_tuple.c) tf.add_to_collection(self._initial_state_name, state_tuple.h) for state_tuple in self._final_state: tf.add_to_collection(self._final_state_name, state_tuple.c) tf.add_to_collection(self._final_state_name, state_tuple.h) def import_state_tuples(self, state_tuples, name, num_replicas): restored = [] for i in range(len(state_tuples) * num_replicas): c = tf.get_collection_ref(name)[2 * i + 0] h = tf.get_collection_ref(name)[2 * i + 1] restored.append(tf.contrib.rnn.LSTMStateTuple(c, h)) return tuple(restored) def import_ops(self): if self._is_training: self._train_op = tf.get_collection_ref('train_op')[0] self._lr = tf.get_collection_ref('lr')[0] self._new_lr = tf.get_collection_ref('new_lr')[0] self._lr_update = tf.get_collection_ref('lr_update')[0] rnn_params = tf.get_collection_ref('rnn_params') if self._cell and rnn_params:
tensorflow.get_collection_ref
4,827
from tensorflow.python.framework import tensor_shape return [tensor_shape.unknown_shape()] elif input_shape.ndims <= 1: return [tensor_shape.scalar()] dimension = tensor_util.ConstantValue(op.inputs[1]) if dimension is None: return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)] elif 0 <= dimension and dimension < input_shape.ndims: returned_shape = [] for i, dim in enumerate(input_shape.dims): if i != dimension: returned_shape.append(dim)
tensorflow.python.framework.tensor_shape.unknown_shape
4,828
import tensorflow as tf input = op.inputs[0] return tf.zeros_like(input) """ Saliency maps https://arxiv.org/abs/1312.6034 """ class Saliency(GradientBasedMethod): def get_symbolic_attribution(self): return [tf.abs(g) for g in tf.gradients(ys=self.T, xs=self.X)] """ Gradient * Input https://arxiv.org/pdf/1704.02685.pdf - https://arxiv.org/abs/1611.07270 """ class GradientXInput(GradientBasedMethod): def get_symbolic_attribution(self): return [g * x for g, x in zip(
tensorflow.gradients
4,829
import tensorflow as tf # with tf.control_dependencies([p]): pred_flat = tf.reshape(pred, [-1]) tgt_flat = tf.reshape(tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) num_sam = tools.shape(batch)[0] index = tf.range(num_sam) divider = tf.constant(resample, dtype=tf.float32) def sample_compute(cur_loss, i): batch1 = tf.gather(batch, tf.random.shuffle(index)) batch2 = tf.gather(batch, tf.random.shuffle(index)) pred1 = tf.slice(batch1, [0, 0], [num_sam, 1]) pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1]) tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) loss = cur_loss + compute_contra_loss(pred1, pred2, tgt1, tgt2) print(loss) return (loss, i + 1) # def sample_compute(i): # batch1 = tf.gather(batch, tf.random.shuffle(index)) # batch2 = tf.gather(batch, tf.random.shuffle(index)) # pred1 = tf.slice(batch1, [0, 0], [num_sam, 1]) # pred2 = tf.slice(batch2, [0, 0], [num_sam, 1])
tensorflow.slice
4,830
import tensorflow as tf # hard negative mining for classification predictions_for_bg = tf.nn.softmax(cls_pred)[:, 0] prob_for_negtives = tf.where(negtive_mask, 0. - predictions_for_bg, # ignore all the positives 0. - tf.ones_like(predictions_for_bg)) topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=n_neg_to_select) selected_neg_mask = prob_for_negtives > topk_prob_for_bg[-1] # # random select negtive examples for classification
tensorflow.ones_like
4,831
import tensorflow as tf loss_summary = tf.summary.scalar('Loss', cross_entropy) acc_summary = tf.summary.scalar('Accuracy', accuracy) # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
tensorflow.summary.merge
4,832
import tensorflow as tf add = tf.add(in0, in1, "ADD")
tensorflow.add
4,833
import tensorflow as tf num_actions = self.hparams.problem.num_actions logits = tf.constant( 1. / float(num_actions), shape=(obs_shape[:1] + [1, num_actions]) ) value = tf.zeros(obs_shape[:1] + [1]) return {"target_policy": logits, "target_value": value}
tensorflow.zeros
4,834
import tensorflow as tf """ logits, losses = self(features) # pylint: disable=not-callable if self.hparams.sampling_method == "argmax": samples = tf.argmax(logits, axis=-1) else: assert self.hparams.sampling_method == "random"
tensorflow.argmax
4,835
import tensorflow as tf with tf.device(self.devices[device_num]): # Rescale to [0, 1) images *= 1. / 256 # Rescale to [-1,1] instead of [0, 1) images = tf.subtract(images, 0.5) images = tf.multiply(images, 2.0) if self.data_format == 'NCHW': images = tf.transpose(images, [0, 3, 1, 2])
tensorflow.subtract
4,836
import tensorflow as tf adv_bc * tf.nn.relu(1.0 - (self.correction_term / (rho + eps))) * f_i_), axis=1) # IMP: This is sum, as expectation wrt f loss_bc = -tf.reduce_mean(gain_bc) loss_policy = loss_f + loss_bc # Value/Q function loss, and explained variance
tensorflow.reduce_mean
4,837
import tensorflow as tf # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas: return output, scores return output def self_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1])
tensorflow.expand_dims
4,838
import tensorflow as tf perturbation loss """ noise = [tf.random_normal(shape=tf.shape(emb)) for emb in embedded] masked = [_mask_by_length(n, length) for n in noise]
tensorflow.shape
4,839
import tensorflow as tf """Build the evaluation graph.""" with tf.Graph().as_default(): # create a TF session for the current graph
tensorflow.Graph
4,840
import tensorflow as tf return (probs, corrects) def _compute_loss(self, logits, aux_logits_list, classes, **knobs): reg_decay = knobs['reg_decay'] aux_loss_mul = knobs['aux_loss_mul'] # Multiplier for auxiliary loss # Compute sparse softmax cross entropy loss from logits & labels log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=classes) loss = tf.reduce_mean(log_probs) self._mark_for_monitoring('loss', loss) # Add regularization loss reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) reg_loss = reg_decay * tf.add_n(reg_losses) self._mark_for_monitoring('reg_loss', reg_loss)
tensorflow.reduce_mean
4,841
import tensorflow as tf elif activation == "elu": new_context_act = tf.nn.elu(new_context)
tensorflow.nn.elu
4,842
import tensorflow as tf # If the vocabulary is empty add a dummy value with count one so # the tensorflow index operations don't fail to initialize with empty # tensors downstream. dummy_value = (b'49d0cd50-04bb-48c0-bc6f-5b575dce351a' if tf.dtypes.as_dtype(dtype) == tf.string else b'-1') return (1, dummy_value)
tensorflow.dtypes.as_dtype
4,843
import tensorflow as tf tf.int32) a = tf.assign_add(p, tf.fill([1024, 1024], 0))
tensorflow.fill
4,844
from tensorflow.contrib import layers bias variable for each class. Rest of the model structure learns the residual after centered bias. target_dimension: TODO(zakaria): dimension of the target for multilabels. config: RunConfig object to configure the runtime settings. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ target_column = layers.regression_target( weight_column_name=weight_column_name, target_dimension=target_dimension) super(DNNLinearCombinedRegressor, self).__init__( model_dir=model_dir, linear_feature_columns=linear_feature_columns, linear_optimizer=linear_optimizer, dnn_feature_columns=dnn_feature_columns, dnn_optimizer=dnn_optimizer,
tensorflow.contrib.layers.regression_target
4,845
import tensorflow as tf """actual host call function.""" step = global_step[0] with tf.contrib.summary.create_file_writer( logdir=model_dir, filename_suffix=".host_call").as_default(): with tf.contrib.summary.always_record_summaries(): for i, name in enumerate(metric_names): if reduce_fn is None: scalar = args[i][0] else: scalar = reduce_fn(args[i]) with tf.contrib.summary.record_summaries_every_n_global_steps( 100, global_step=step): tf.contrib.summary.scalar(prefix + name, scalar, step=step) return tf.contrib.summary.all_summary_ops() global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1]) other_tensors = [tf.reshape(monitor_dict[key], [1]) for key in metric_names] return host_call_fn, [global_step_tensor] + other_tensors def two_stream_loss(FLAGS, features, labels, mems, is_training): """Pretraining loss with two-stream attention Transformer-XL."""
tensorflow.contrib.summary.scalar
4,846
import tensorflow as tf y = tf.nn.softmax(tf.matmul(x, w) + b) y_ = tf.placeholder(tf.float32, [None, shape[1]])
tensorflow.placeholder
4,847
import tensorflow as tf indices.bin_counts, indices.active_block_indices, x, dynamic_bsize=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_bstride=tf.constant(block_params.bsize_out, dtype=tf.int32), dynamic_boffset=tf.constant([0, 0], dtype=tf.int32),
tensorflow.constant
4,848
import tensorflow as tf [batch_size, num_classes]) pre_pool = end_points['Conv2d_13_pointwise'] self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 4, 4, 1024]) def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess:
tensorflow.reset_default_graph
4,849
import tensorflow as tf # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
tensorflow.layers.dense
4,850
import tensorflow as tf self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights self._score_summaries.update(self._anchor_targets) return rpn_labels def _proposal_target_layer(self, rois, roi_scores, name): with tf.variable_scope(name): # 这里的index是对于cfg.FLAGS.batch_size=256 而言 # rois (0, x1, y1, x2, y2),coming from RPN 然后再减少至256个 # bbox_target (ndarray): N x 4K blob of regression targets # bbox_inside_weights (ndarray): N x 4K blob of loss weights rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = tf.py_func( proposal_target_layer, [rois, roi_scores, self._gt_boxes, self._num_classes], [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32]) rois.set_shape([cfg.FLAGS.batch_size, 5]) roi_scores.set_shape([cfg.FLAGS.batch_size]) labels.set_shape([cfg.FLAGS.batch_size, 1]) bbox_targets.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) bbox_inside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) bbox_outside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4])
tensorflow.py_func
4,851
from tensorflow.python.training import training ["centered_bias_%d" % cb for cb in range( self._target_column.num_label_columns)], array_ops.reshape(centered_bias, [-1])) return centered_bias def _centered_bias_step(self, targets, features): centered_bias = ops.get_collection(self._centered_bias_weight_collection) batch_size = array_ops.shape(targets)[0] logits = array_ops.reshape( array_ops.tile(centered_bias[0], [batch_size]), [batch_size, self._target_column.num_label_columns]) loss = self._target_column.loss(logits, targets, features) # Learn central bias by an optimizer. 0.1 is a convervative lr for a single # variable. return training.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias) def _logits(self, features, is_training=False): linear_feature_columns = self._get_linear_feature_columns() dnn_feature_columns = self._get_dnn_feature_columns() if not (linear_feature_columns or dnn_feature_columns): raise ValueError("Either linear_feature_columns or dnn_feature_columns " "should be defined.") if linear_feature_columns and dnn_feature_columns: logits = (self._linear_logits(features, is_training) + self._dnn_logits(features, is_training)) elif dnn_feature_columns: logits = self._dnn_logits(features, is_training)
tensorflow.python.training.training.AdagradOptimizer
4,852
import tensorflow as tf init_op = tf.initialize_all_variables()
tensorflow.initialize_all_variables
4,853
import tensorflow as tf widths = scales_grid * ratio_sqrts * base_size[0] x_centers = tf.cast(tf.range(features_width), tf.float32) x_centers = x_centers * stride[1] y_centers = tf.cast(tf.range(features_height), tf.float32) y_centers = y_centers * stride[0] # x_centers = x_centers + offset[1] # y_centers = y_centers + offset[0] x_centers, y_centers = tf.meshgrid(x_centers, y_centers) widths, x_centers = tf.meshgrid(widths, x_centers) heights, y_centers = tf.meshgrid(heights, y_centers) anchor_centers = tf.stack([x_centers, y_centers], axis=2) anchor_centers = tf.reshape(anchor_centers, [-1, 2]) anchor_sizes = tf.stack([widths, heights], axis=2)
tensorflow.meshgrid
4,854
import tensorflow as tf lm_embeddings = tf.concat( [tf.expand_dims(t, axis=1) for t in layers_without_bos_eos], axis=1 ) # get the mask op without bos/eos. # tf doesn't support reversing boolean tensors, so cast # to int then back mask_wo_bos_eos = tf.cast(lm_graph.mask[:, 1:], 'int32') mask_wo_bos_eos = tf.reverse_sequence( mask_wo_bos_eos, lm_graph.sequence_lengths - 1, seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = mask_wo_bos_eos[:, 1:]
tensorflow.cast
4,855
import tensorflow as tf head_etd = tf.expand_dims(head, 3) # bs,bn,bl,1,vec logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,bn,bl,bl,vec logits_masked = exp_mask_for_high_rank(logits, attn_mask) attn_score = tf.nn.softmax(logits_masked, 3) # bs,bn,bl,bl,vec attn_score = mask_for_high_rank(attn_score, attn_mask) # bs,bn,bl,bl,vec self_attn_result = tf.reduce_sum(attn_score * rep_map_tile, 3) # bs,bn,bl,vec
tensorflow.nn.softmax
4,856
import tensorflow as tf self._dropout_keep_prob = dropout_keep_prob self._out_vocab_size = out_vocab_size self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len], name='x') self.y = tf.placeholder(tf.float32,
tensorflow.placeholder
4,857
import tensorflow as tf Args: x: The input tensor. prediction: The prediction class tensor. output_class: The output tensor. sess: The graph session. """ # input label placeholder y = tf.placeholder("float", [None, self.n_classes]) # Loss function loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y)) # Optimization opt = tf.train.AdamOptimizer( learning_rate=self.learning_rate).minimize(loss)
tensorflow.placeholder
4,858
import tensorflow as tf opt = tf.train.AdagradOptimizer(1.0) self.opt = opt self.sess = None self.train_step = None self.post_step = None self.graph = tf.Graph() with self.graph.as_default(): self.head_input = tf.placeholder(tf.int32, shape=[None]) self.rel_input = tf.placeholder(tf.int32, shape=[None]) self.tail_input = tf.placeholder(tf.int32, shape=[None])
tensorflow.Graph
4,859
import tensorflow as tf sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params, state_init_a, state_final_a
tensorflow.get_collection
4,860
import tensorflow as tf outputs = tf.layers.conv1d(inputs, embed_size // 2, 1, padding="SAME") for k in range(2, K + 1): with tf.variable_scope("num_{}".format(k)): output = tf.layers.conv1d(inputs, embed_size // 2, k, padding="SAME") outputs = tf.concat((outputs, output), -1) outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=is_training)) return outputs
tensorflow.concat
4,861
from tensorflow.python.platform import tf_logging as logging def end(self, session=None): super(CheckpointSaver, self).end(session) self._save(self._last_begin_step, session) def _save(self, step, session): """Saves the latest checkpoint.""" if step == self._last_saved_step: return logging.info("Saving checkpoints for %d into %s.", step, self._save_path) self._last_saved_time = time.time() self._last_saved_step = step if self._saver is None: self._scaffold.saver.save(session, self._save_path, global_step=step) else: self._saver.save(session, self._save_path, global_step=step) self._summary_writer.add_session_log( SessionLog(
tensorflow.python.platform.tf_logging.info
4,862
import tensorflow as tf def load_image_train(datapoint): """Load images for training.""" input_image = tf.image.resize(datapoint['image'], (512, 512)) input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128)) if tf.random.uniform(()) > 0.5: input_image = tf.image.flip_left_right(input_image)
tensorflow.image.resize
4,863
import tensorflow as tf Args: input_batch: Input batch Tensor. reduction_indices: Indices of `input_batch` to reduce over. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned. Returns: Tuple of (mean, variance, second_moment). """ # Set up our moving statistics. When connecting in parallel, this is shared. self._moving_mean = tf.get_variable( "moving_mean", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.VARIABLES], initializer=tf.zeros_initializer, trainable=False) self._moving_second_moment = tf.get_variable( "moving_second_moment",
tensorflow.get_variable
4,864
import tensorflow as tf """ original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.']) cropped_shape = control_flow_ops.with_dependencies( [rank_assertion],
tensorflow.rank
4,865
import tensorflow as tf with tf.name_scope(clone.scope): clone_loss = _gather_clone_loss(clone, len(clones), regularization_losses) if clone_loss is not None: clones_losses.append(clone_loss) # Only use regularization_losses for the first clone regularization_losses = None if clones_losses: total_loss = tf.add_n(clones_losses, name='total_loss') # Add the summaries from the first clone. These contain the summaries # created by model_fn and either optimize_clones() or _gather_clone_loss(). summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone.scope)) if total_loss is not None:
tensorflow.add_n
4,866
import tensorflow as tf current_inputs = text_emb # [num_sentences, max_sentence_length, emb] for layer in range(self.config["contextualization_layers"]): with tf.variable_scope("layer_{}".format(layer)): with tf.variable_scope("fw_cell"): cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout) with tf.variable_scope("bw_cell"): cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout) state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1])) state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1])) (fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn( cell_fw=cell_fw, cell_bw=cell_bw, inputs=current_inputs, sequence_length=text_len,
tensorflow.tile
4,867
import tensorflow as tf def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape) images,labels=tf.train.shuffle_batch([image,label], batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_test_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) def get_valid_batch(image,label,batch_size): images,labels=tf.train.batch([image,label],batch_size=batch_size) return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size]) class trainwork(object): def __init__(self): with tf.variable_scope('scop'):
tensorflow.reshape
4,868
import tensorflow as tf "chars": tf.shape(features["chars"]), "source": tf.shape(features["source"]),
tensorflow.shape
4,869
import tensorflow as tf def false_positive_rate_at_true_positive_rate_loss(labels, logits, target_rate, weights=1.0, dual_rate_factor=0.1, label_priors=None, surrogate_type='xent', lambdas_initializer=tf.constant_initializer(1.0), reuse=None, variables_collections=None, trainable=True, scope=None): """Computes false positive rate at true positive rate loss. Note that `true positive rate` is a synonym for Recall, and that minimizing
tensorflow.constant_initializer
4,870
import tensorflow as tf pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp)) pos = tf.floor(encoder_input_length * pos) pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size])) idx = tf.reshape(idx, [-1, attn_length]) low = pos - encoder.attn_window_size high = pos + encoder.attn_window_size mlow = tf.to_float(idx < low) mhigh = tf.to_float(idx > high)
tensorflow.reshape
4,871
import tensorflow as tf with tf.variable_scope(name): with tf.variable_scope('branch1_1x1'): if o1s>0: conv1 = conv(inp, inSize, o1s, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) net.append(conv1) with tf.variable_scope('branch2_3x3'): if o2s1>0: conv3a = conv(inp, inSize, o2s1, 1, 1, 1, 1, 'SAME', 'conv1x1', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) conv3 = conv(conv3a, o2s1, o2s2, 3, 3, ks, ks, 'SAME', 'conv3x3', phase_train=phase_train, use_batch_norm=use_batch_norm, weight_decay=weight_decay) net.append(conv3)
tensorflow.variable_scope
4,872
import tensorflow as tf """Batch normalization with corresponding log determinant Jacobian.""" if reuse is None: reuse = not train # create variables with tf.variable_scope(name) as scope: if reuse: scope.reuse_variables() var = variable_on_cpu( "var", [dim], tf.constant_initializer(1.), trainable=False) mean = variable_on_cpu( "mean", [dim], tf.constant_initializer(0.), trainable=False) step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) # choose the appropriate moments if train: used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") cur_mean, cur_var = used_mean, used_var if bn_lag > 0.: used_var = stable_var(input_=input_, mean=used_mean, axes=axes) cur_var = used_var used_mean -= (1 - bn_lag) * (used_mean - tf.stop_gradient(mean))
tensorflow.constant_initializer
4,873
from tensorflow.contrib.framework import tensor_util name, 'precision', [predictions, labels]): predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels)
tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions
4,874
import tensorflow as tf if type(a[0]) == tf.Tensor: return tf.stack(a, 0) else: return tf.constant(a, dtype) else: print(type(a)) return tf.constant(a, dtype) return sbnet_module.reduce_mask( mask, block_params.bcount, dynamic_bsize=to_tensor(block_params.bsize, tf.int32),
tensorflow.constant
4,875
import tensorflow as tf span_width = 1 + span_ends - span_starts # [k] if self.config["use_features"]: span_width_index = span_width - 1 # [k] span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb] span_width_emb = tf.nn.dropout(span_width_emb, self.dropout) span_emb_list.append(span_width_emb) if self.config["model_heads"]: span_indices = tf.expand_dims(tf.range(self.config["max_span_width"]), 0) + tf.expand_dims(span_starts, 1) # [k, max_span_width] span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width] span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb] with tf.variable_scope("head_scores"): self.head_scores = util.projection(context_outputs, 1) # [num_words, 1] span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1] span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1] span_head_scores += tf.log(span_mask) # [k, max_span_width, 1] span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1] span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb] span_emb_list.append(span_head_emb) span_emb = tf.concat(span_emb_list, 1) # [k, emb]
tensorflow.gather
4,876
from tensorflow.python.framework import ops var = state_ops.assign(var, array_ops.zeros_like(inputs[0])) update_ops = [] for input_tensor in inputs: op = state_ops.assign_add(var, input_tensor, use_locking=True) update_ops.append(op) with ops.control_dependencies(update_ops): return gen_state_ops._destroy_temporary_variable(var, var_name=var_name, name=name)
tensorflow.python.framework.ops.control_dependencies
4,877
from tensorflow.python.framework import ops Returns: A Tensor of the same shape of `x`. Raises: ValueError: If `keep_prob` is not in `(0, 1]`. """ with ops.op_scope([x], name, "dropout") as name: x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, float) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor( keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
tensorflow.python.framework.ops.convert_to_tensor
4,878
from tensorflow.python.framework import ops padding=padding, data_format=data_format, name=name) ops.RegisterShape("Relu")(common_shapes.unchanged_shape) ops.RegisterShape("Relu6")(common_shapes.unchanged_shape) ops.RegisterShape("Elu")(common_shapes.unchanged_shape) ops.RegisterShape("Softplus")(common_shapes.unchanged_shape) ops.RegisterShape("Softsign")(common_shapes.unchanged_shape) @ops.RegisterShape("ReluGrad") @ops.RegisterShape("Relu6Grad") @ops.RegisterShape("EluGrad") @ops.RegisterShape("SoftplusGrad") @ops.RegisterShape("SoftsignGrad") def _BinaryElementwiseShape(op): """Returns same shape as both inputs to op. Args: op: Input operation. Returns: Shape of both inputs to `op`. """ return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]
tensorflow.python.framework.ops.RegisterShape
4,879
import tensorflow as tf grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None] grads = [g for (g, v) in grads_and_vars_not_none] variables = [v for (g, v) in grads_and_vars_not_none] #print_ops = [tf.print("loss=", self.loss)] + [tf.print("norms_g", tf.norm(g)) for g in grads] + [tf.print("g", g) for g in grads] + [tf.print("p", self.prob)] + [tf.print("invFisher", self.invFisher)] + [tf.print("invA", self.invA)] + [tf.print("invB", self.invB)] #print_ops = [tf.print("partA =", self.partA, summarize=-1), tf.print("partB =", self.partB, summarize=-1), tf.print("prob_sliced =", self.prob_sliced, summarize=-1), tf.print("natural_gradient_theta =", self.natural_gradient_loss_theta, summarize=-1), tf.print("euclidean gradient =", self.euclidean_gradient, summarize=-1), tf.print("TA =", self.TA, summarize=-1), tf.print("Tup =", self.Tup, summarize=-1) , tf.print("Tdown =", self.Tdown, summarize=-1) , tf.print("TB =", self.TB, summarize=-1)] + [tf.print("norms_g", tf.norm(g), summarize=-1) for g in grads] #with tf.control_dependencies(print_ops): clip_value = clipping_kwargs["value"] clipped_grads, global_norm = tf.clip_by_global_norm(grads, clip_value) clipped_grads_and_vars = [(clipped_grads[i], variables[i]) for i in range(len(grads))] elif clipping_method == "clip_by_norm": grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None] grads = [g for (g, v) in grads_and_vars_not_none] variables = [v for (g, v) in grads_and_vars_not_none]
tensorflow.clip_by_global_norm
4,880
import tensorflow as tf weighted_average = [] for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1): pos_ = pos + offset pos_ = tf.minimum(pos_, encoder_input_length - 1) pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S> weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length)) weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1) weighted_average.append(weighted_average_) weighted_average = tf.concat(weighted_average, axis=1) weighted_average = dense(weighted_average, encoder.attn_size)
tensorflow.squeeze
4,881
import tensorflow as tf lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
tensorflow.nn.batch_normalization
4,882
from tensorflow.python.client import graph_util @ops.RegisterStatistics("Conv2D", "flops") def _calc_conv_flops(graph, node): """Calculates the compute resources needed for Conv2D.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0])
tensorflow.python.client.graph_util.tensor_shape_from_node_def_name
4,883
from tensorflow.python.framework import ops name: Optional name for the operation. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor. """ with ops.op_scope([value], name, "AvgPool") as name: value = ops.convert_to_tensor(value, name="input") return gen_nn_ops._avg_pool(value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
tensorflow.python.framework.ops.convert_to_tensor
4,884
import tensorflow as tf #### Unpack input mem_name = "mems" mems = mems.get(mem_name, None) inp_k = tf.transpose(features["input_k"], [1, 0]) inp_q = tf.transpose(features["input_q"], [1, 0]) seg_id = tf.transpose(features["seg_id"], [1, 0]) inp_mask = None perm_mask = tf.transpose(features["perm_mask"], [1, 2, 0]) if FLAGS.num_predict is not None: # [num_predict x tgt_len x bsz] target_mapping = tf.transpose(features["target_mapping"], [1, 2, 0]) else: target_mapping = None # target for LM loss tgt = tf.transpose(features["target"], [1, 0]) # target mask for LM loss tgt_mask = tf.transpose(features["target_mask"], [1, 0]) # construct xlnet config and save to model_dir xlnet_config = xlnet.XLNetConfig(FLAGS=FLAGS) xlnet_config.to_json(os.path.join(FLAGS.model_dir, "config.json"))
tensorflow.transpose
4,885
import tensorflow as tf def _embed(self): with tf.variable_scope('word_char_embedding'): if self.config.fix_pretrained_vector: self.pretrained_word_mat = tf.get_variable("word_emb_mat", [self.vocab.word_size() - 2, self.vocab.word_embed_dim], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.word_embeddings[2:], dtype=tf.float32), trainable=False) self.word_pad_unk_mat = tf.get_variable("word_unk_pad", [2, self.pretrained_word_mat.get_shape()[1]], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.word_embeddings[:2], dtype=tf.float32), trainable=True) self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0) self.pretrained_char_mat = tf.get_variable("char_emb_mat", [self.vocab.char_size() - 2, self.vocab.char_embed_dim], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.char_embeddings[2:], dtype=tf.float32), trainable=False)
tensorflow.constant_initializer
4,886
import tensorflow as tf y_centers = tf.cast(tf.range(features_height), tf.float32) y_centers = y_centers * stride[0] # x_centers = x_centers + offset[1] # y_centers = y_centers + offset[0] x_centers, y_centers = tf.meshgrid(x_centers, y_centers) widths, x_centers = tf.meshgrid(widths, x_centers) heights, y_centers = tf.meshgrid(heights, y_centers) anchor_centers = tf.stack([x_centers, y_centers], axis=2) anchor_centers = tf.reshape(anchor_centers, [-1, 2]) anchor_sizes = tf.stack([widths, heights], axis=2) anchor_sizes = tf.reshape(anchor_sizes, [-1, 2]) anchors = tf.concat([anchor_centers - .5 * anchor_sizes, anchor_centers + .5 * anchor_sizes], 1) # anchors = box_utils.convert_yxyx_to_xyxy_format(anchors) return anchors
tensorflow.reshape
4,887
import tensorflow as tf parsed_tensors[k] = tf.sparse.to_dense( parsed_tensors[k], default_value='') else: parsed_tensors[k] = tf.sparse.to_dense( parsed_tensors[k], default_value=0)
tensorflow.sparse.to_dense
4,888
import tensorflow as tf coord.join(threads) def predict_time(loop=100): feed_dict={ testnum:1 } with tf.Session(config=config) as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) tf.train.Saver().restore(sess,path) total=0.0 for i in range(loop): a = datetime.now() accuracy_np = sess.run([accuracy],feed_dict=feed_dict) b = datetime.now()
tensorflow.train.Coordinator
4,889
import tensorflow as tf outputs = outputs.stack() weights = weights.stack() # batch_size, encoders, output time, input time states = states.stack() attns = attns.stack() samples = samples.stack() # put batch_size as first dimension outputs = tf.transpose(outputs, perm=(1, 0, 2)) weights = tf.transpose(weights, perm=(1, 0, 2)) states = tf.transpose(states, perm=(1, 0, 2)) attns = tf.transpose(attns, perm=(1, 0, 2)) samples = tf.transpose(samples) return outputs, weights, states, attns, samples, get_logits, initial_data
tensorflow.transpose
4,890
import tensorflow as tf sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length] flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words] flattened_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num_words] candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1), [1, self.max_span_width]) # [num_words, max_span_width] candidate_ends = candidate_starts + tf.expand_dims(tf.range(self.max_span_width), 0) # [num_words, max_span_width] candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width] candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width] candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width] flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]
tensorflow.range
4,891
import tensorflow as tf print("----feature_mat-----") print(feature_mat) # exit() # Temporarily crush the feature_mat's dimensions feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9 # New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9 # Linear activation, reshaping inputs to the LSTM's number of hidden: hidden = tf.nn.relu(tf.matmul( feature_mat, config.W['hidden'] ) + config.biases['hidden']) # New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32] print("--n_steps--") print(config.n_steps) print("--hidden--") print(hidden)
tensorflow.matmul
4,892
import tensorflow as tf def _selector(self, context, h, reuse=False): with tf.variable_scope('selector', reuse=reuse): w = tf.get_variable('w', [self.H, 1], initializer=self.weight_initializer) b = tf.get_variable('b', [1], initializer=self.const_initializer) beta = tf.nn.sigmoid(tf.matmul(h, w) + b, 'beta') # (N, 1) context = tf.multiply(beta, context, name='selected_context') return context, beta def _decode_lstm(self, x, h, context, dropout=False, reuse=False): with tf.variable_scope('logits', reuse=reuse): w_h = tf.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer) w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer) b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer) if dropout: h = tf.nn.dropout(h, 0.5) h_logits = tf.matmul(h, w_h) + b_h
tensorflow.variable_scope
4,893
from tensorflow.python.ops import partitioned_variables weight_collections=[parent_scope], scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) for layer_id, num_hidden_units in enumerate(hidden_units): with variable_scope.variable_scope(
tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner
4,894
import tensorflow as tf # 返回的rois中多加了一列0在第一列 rois, rpn_scores = tf.py_func(proposal_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32]) rois.set_shape([None, 5]) rpn_scores.set_shape([None, 1]) return rois, rpn_scores def _crop_pool_layer(self, bottom, rois, name): with tf.variable_scope(name): # tf.squeeze()返回一个张量,这个张量是将原始input中所有维度中为1的那些维都删掉的结果 batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1]) # Get the normalized coordinates of bboxes bottom_shape = tf.shape(bottom) height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0]) width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0]) # rois除以h,w就得到了rois在特征图上的位置 x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height # Won't be backpropagated to rois anyway, but to save time bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1)) # 'roi_pooling_size', 7 pre_pool_size = cfg.FLAGS.roi_pooling_size * 2 # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小 crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
tensorflow.shape
4,895
import tensorflow as tf initializer = tf.global_variables_initializer() config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(graph=graph, config=config) self.sess.run(initializer) loop = trange(self.num_epochs, disable=not self.verbose) for _ in loop:
tensorflow.Session
4,896
import tensorflow as tf b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1") conv_t1 = utils.conv2d_transpose_strided(concat1, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"])) fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1") deconv_shape2 = image_net["pool3"].get_shape() W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2") b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2") conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"])) fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2") shape = tf.shape(image) deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 3]) W_t3 = utils.weight_variable([16, 16, 3, deconv_shape2[3].value], name="W_t3") b_t3 = utils.bias_variable([3], name="b_t3") conv_t3 = tf.nn.relu(utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)) annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction") return tf.expand_dims(annotation_pred, dim=3), conv_t3
tensorflow.shape
4,897
import tensorflow as tf val = val[permutation] shape = np.array([3, 4]).astype(np.int64) return tf.SparseTensorValue(ind, val, shape) def _SparseTensorValue_1x1x1(self):
tensorflow.SparseTensorValue
4,898
import tensorflow as tf self._build_summaries() self.epoch_stats = get_stats_template() self.stats = Bunch( epoch_accuracy=[], epoch_reconstructions=[], permutation=None ) # if FLAGS.dev: # plt.ion() # plt.show() def _build_summaries(self): # losses with tf.name_scope('losses'): loss_names = ['loss_autoencoder', 'loss_predictive', 'loss_distance', 'loss_denoising'] for i, loss in enumerate(self.losses): self._add_loss_summary(loss_names[i], loss) self._add_loss_summary('loss_total', self.loss_total) self.summs_train = tf.summary.merge_all('train') # reconstructions with tf.name_scope('decodings'): self.image_summaries = { 'orig': self._add_decoding_summary('0_original_input', self.input), 'reco': self._add_decoding_summary('1_reconstruction', self.eval_decode), 'pred': self._add_decoding_summary('2_prediction', self.eval_decode), 'midd': self._add_decoding_summary('3_averaged', self.eval_decode), 'nois': self._add_decoding_summary('4_noisy', self.eval_decode)
tensorflow.name_scope
4,899