text stringlengths 81 112k |
|---|
Decode from dataset on new checkpoint.
def continuous_decode(self):
"""Decode from dataset on new checkpoint."""
for _ in next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins):
self.decode() |
Decode from dataset on new checkpoint.
def continuous_decode_on_train_data(self):
"""Decode from dataset on new checkpoint."""
for _ in next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins):
self.decode(dataset_split=tf.estimator.ModeKeys.TRAIN) |
Decode from dataset on new checkpoint.
def continuous_decode_on_eval_data(self):
"""Decode from dataset on new checkpoint."""
if self._hparams.mlperf_mode:
ckpt_generator = next_undecoded_checkpoint(
self._hparams.model_dir, self._decode_hparams.decode_timeout_mins)
else:
ckpt_generator = next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins)
for ckpt in ckpt_generator:
current_step = decoding.get_step_from_ckpt_path(ckpt)
tf.logging.info("Decoding step %d" % current_step)
# Skip checkpoint 0.
if current_step == 0:
continue
# Decode the latest checkpoint by default.
checkpoint_path = None
if self._hparams.mlperf_mode:
self._decode_hparams.mlperf_decode_step = current_step
checkpoint_path = ckpt
mlperf_log.transformer_print(key=mlperf_log.EVAL_START)
self.decode(
dataset_split=tf.estimator.ModeKeys.EVAL,
checkpoint_path=checkpoint_path)
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "true"})
break
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and not d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "false"}) |
Decode from file on new checkpoint.
def continuous_decode_from_file(self):
"""Decode from file on new checkpoint."""
for _ in next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins):
self.decode(decode_from_file=True) |
Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
"""
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict |
Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
def _unflatten_dict(flat_dict, prefixes):
"""Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
"""
original_dict = {}
for key, value in flat_dict.items():
prefix_found = False
for prefix in prefixes:
full_prefix = "__" + prefix + "_"
if key.startswith(full_prefix):
# Add a dict to the original dict with key=prefix
if prefix not in original_dict:
original_dict[prefix] = {}
original_dict[prefix][key[len(full_prefix):]] = value
prefix_found = True
break
if not prefix_found:
# No key matched a prefix in the for loop.
original_dict[key] = value
return original_dict |
Dummy vars for restore to work when not using TPU codepath.
def create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False) |
Create the metrics_fn that TPUEstimatorSpec expects.
def create_tpu_eval_metrics_fn(problem, model_hparams):
"""Create the metrics_fn that TPUEstimatorSpec expects."""
metric_fns = []
eval_metrics = problem.eval_metric_fns(model_hparams)
tm = _create_target_modality(problem.get_hparams(model_hparams).modality)
if isinstance(tm, dict):
for k, v in six.iteritems(tm):
weights_fn = modalities.get_weights_fn(v)
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels, features, weights_fn=weights_fn):
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if ("features" in args) or keywords:
kwargs["features"] = features
num, den = metric_fn(logits, labels, weights_fn=weights_fn, **kwargs)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric, metric_fn in six.iteritems(eval_metrics):
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "%s/metrics-%s/%s" % (k, problem.name, metric)
metric_fns.append((name, make_metric_fn(metric_fn)))
else:
weights_fn = modalities.get_weights_fn(tm)
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels, features):
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if ("features" in args) or keywords:
kwargs["features"] = features
num, den = metric_fn(logits, labels, weights_fn=weights_fn, **kwargs)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric, metric_fn in six.iteritems(eval_metrics):
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "metrics-%s/%s" % (problem.name, metric)
metric_fns.append((name, make_metric_fn(metric_fn)))
def all_metrics_fn(**kwargs):
"""Construct metrics dictionary."""
original_kwargs = _unflatten_dict(kwargs, prefixes=["logits", "features"])
del kwargs
logits = original_kwargs["logits"]
labels = original_kwargs["labels"]
features = original_kwargs["features"]
del original_kwargs
metrics_dict = {}
for name, fn in metric_fns:
if isinstance(logits, dict) and isinstance(labels, dict):
for k, v in six.iteritems(logits):
metrics_dict["%s/%s" % (k, name)] = fn(v, labels[k], features)
elif isinstance(logits, dict):
tf.logging.warning("Logits is a dict, but labels is not; only "
"evaluating logits['targets'] against labels.")
metrics_dict["%s/%s" % ("targets", name)] = fn(logits["targets"],
labels, features)
else:
metrics_dict[name] = fn(logits, labels, features)
return metrics_dict
return all_metrics_fn |
Remove summaries from the default graph.
def remove_summaries():
"""Remove summaries from the default graph."""
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
log_debug("Remove summaries %s" % str(g.get_collection(key)))
del g.get_collection_ref(key)[:]
assert not g.get_collection(key) |
Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs) |
Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses |
Generate summaries for features.
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
(v.dtype != tf.string)):
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding)) |
Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
def _compose_custom_getters(getter_a, getter_b):
"""Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
"""
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn |
Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
"""
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter)) |
Initialize variables from given directory.
def initialize_from_ckpt(ckpt_dir, hparams):
"""Initialize variables from given directory."""
model_dir = hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
return
tf.logging.info("Checkpoint dir: %s", ckpt_dir)
reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
variable_map = {}
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
if reader.has_tensor(var_name):
tf.logging.info("Loading variable from checkpoint: %s", var_name)
variable_map[var_name] = var
else:
tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
var_name)
tf.train.init_from_checkpoint(ckpt_dir, variable_map) |
Whether the target modality is real-valued.
def _target_modality_is_real(self):
"""Whether the target modality is real-valued."""
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality = self._problem_hparams.modality["targets"]
modality_name = self._hparams.name.get(
"targets",
modalities.get_name(modality))(self._hparams, vocab_size)
return modality_name.startswith("real") |
Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
def model_fn_sharded(self, sharded_features):
"""Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
"""
dp = self._data_parallelism
# [{str: Tensor}]. Transpose of 'sharded_features'.
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded():
if self.hparams.scheduled_sampling_prob > 0.0:
raise NotImplementedError(
"Scheduled sampling for non-sharded body only.")
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([({
"training": l
} for l in loss) for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
sharded_logits, sharded_losses = dp(
self.maybe_scheduled_sampling,
datashard_to_features, sharded_logits, sharded_losses)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
return sharded_logits, losses |
Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features |
Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
}
def top(self, body_output, features):
"""Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
}
"""
if isinstance(body_output, dict):
logits = {}
for k, v in six.iteritems(body_output):
# TODO(aidangomez): share variables here?
with tf.variable_scope(k) as top_vs:
self._add_variable_scope("top_%s" % k, top_vs)
logits[k] = self._top_single(v, k, features)
return logits
else:
return self._top_single(body_output, "targets", features) |
Return a training op minimizing loss.
def optimize(self, loss, num_async_replicas=1, use_tpu=False):
"""Return a training op minimizing loss."""
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu)
return train_op |
Set hparams with the given mode.
def set_mode(self, mode):
"""Set hparams with the given mode."""
log_info("Setting T2TModel mode to '%s'", mode)
hparams = hparams_lib.copy_hparams(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout") or key == "label_smoothing":
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams |
Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"] |
A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
"""
set_custom_getter_compose(self._custom_getter)
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length, use_tpu)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
return results |
Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha, use_tpu) |
Slow version of Beam search decoding.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do slow beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search.
Raises:
NotImplementedError: If use_tpu is set to true.
def _beam_decode_slow(self, features, decode_length, beam_size, top_beams,
alpha, use_tpu=False):
"""Slow version of Beam search decoding.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do slow beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search.
Raises:
NotImplementedError: If use_tpu is set to true.
"""
batch_size = common_layers.shape_list(features["inputs"])[0]
def symbols_to_logits_fn(ids, i=None):
"""Go from ids to logits."""
ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
if "partial_targets" in features:
pt = features["partial_targets"]
pt_length = common_layers.shape_list(pt)[1]
pt = tf.tile(pt, [1, beam_size])
pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
ids = tf.concat([pt, ids], axis=1)
features["targets"] = ids
if i is not None:
features["decode_loop_step"] = i
self._coverage = None
logits, _ = self(features) # pylint: disable=not-callable
# now self._coverage is a coverage tensor for the first datashard.
# it has shape [batch_size] and contains floats between 0 and
# source_length.
if self._problem_hparams:
modality = self._problem_hparams.modality["targets"]
top = self._hparams.top.get("targets", modalities.get_top(modality))
if getattr(top, "pointwise", False):
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
return tf.squeeze(logits, axis=[1, 2])
def _clone_examples_for_beam(old_feature, n):
"""Clone each example n times."""
old_shape = common_layers.shape_list(old_feature)
assert len(old_shape) >= 1
# Expand the inputs in to the beam size.
feature = tf.expand_dims(old_feature, 1)
feature = tf.tile(feature, [1, n] + [1] * (len(old_shape) - 1))
new_shape = common_layers.shape_list(feature)
feature = tf.reshape(feature,
[new_shape[0] * new_shape[1]] + new_shape[2:])
return feature
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
# Clone select features multiple times to account for beam size.
old_features = {}
for feature_name in ["inputs", "knowledge"]:
if feature_name not in features:
continue
old_features[feature_name] = features[feature_name]
features[feature_name] = _clone_examples_for_beam(
features[feature_name], beam_size)
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
# Setting decode length to input length + decode_length
if "partial_targets" not in features:
inputs = features["inputs"]
decode_length = (common_layers.shape_list(inputs)[1] +
features.get("decode_length", decode_length))
ids, scores, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
stop_early=(top_beams == 1),
use_tpu=use_tpu)
# Set features back to the unexpanded form to not to confuse the
# Estimator!
features.update(old_features)
# Return `top_beams` decodings (also remove initial id from the beam search)
# TODO(lukaszkaiser): make it work multi-problem.
if top_beams == 1:
samples = ids[:, 0, 1:]
else:
samples = ids[:, :top_beams, 1:]
return {"outputs": samples, "scores": scores} |
A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if use_tpu:
return self._slow_greedy_infer_tpu(features, decode_length)
return self._slow_greedy_infer(features, decode_length) |
A slow greedy inference method on TPU.
Quadratic time in decode_length.
Args:
features: An map of string to `Tensor`.
decode_length: An integer, how many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
def _slow_greedy_infer_tpu(self, features, decode_length):
"""A slow greedy inference method on TPU.
Quadratic time in decode_length.
Args:
features: An map of string to `Tensor`.
decode_length: An integer, how many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
features["partial_targets"] = tf.to_int64(partial_targets)
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.modality["targets"]
def infer_step(i, recent_output, recent_logits, unused_loss):
"""Inference step."""
if not tf.executing_eagerly():
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
features["decode_loop_step"] = i
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.z
top = self._hparams.top.get("targets",
modalities.get_top(target_modality))
if getattr(top, "pointwise", False):
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:, i, :, :]
samples = tf.transpose(recent_output, perm=[1, 0, 2, 3])
samples = inplace_ops.alias_inplace_update(samples, i,
tf.to_int64(cur_sample))
samples = tf.transpose(samples, perm=[1, 0, 2, 3])
if not tf.executing_eagerly():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
recent_logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
recent_logits = inplace_ops.alias_inplace_update(
recent_logits, i, tf.squeeze(logits[:, -1:], axis=1))
logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
loss = sum([l for l in losses.values() if l is not None])
return i + 1, samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
decode_length = 1
else:
if "partial_targets" in features:
prefix_length = common_layers.shape_list(features["partial_targets"])[1]
else:
prefix_length = common_layers.shape_list(features["inputs"])[1]
decode_length = prefix_length + decode_length
# Initial values of result, logits and loss.
result = tf.concat(
[initial_output,
tf.zeros([batch_size, decode_length, 1, 1], tf.int64)],
axis=1)
# tensor padded to [batch_size, decode_length, 1, 1, vocab_size]
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
logits = tf.zeros((batch_size, decode_length, 1, 1, vocab_size))
if not tf.executing_eagerly():
logits.set_shape([None, None, None, None, None])
loss = 0.0
def while_exit_cond(i, result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
not_overflow = i < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
# Check if the last predicted element is a EOS
return tf.reduce_any(
tf.not_equal(
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID))
not_eos = tf.cond(
# We only check for early stopping if there is at least 1 element (
# otherwise not_eos will crash).
tf.not_equal(i, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stopping.
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
_, result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [tf.constant(0), result, logits, loss],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size, decode_length, 1, 1]),
tf.TensorShape([batch_size, decode_length, 1, 1, vocab_size]),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
} |
Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses |
Model fn for Estimator.
Args:
hparams: HParams, model hyperparameters
features: dict<str name, Tensor feature>
labels: Tensor
mode: tf.estimator.ModeKeys
config: RunConfig, possibly with data_parallelism attribute
params: dict, may include batch_size, use_tpu
decode_hparams: HParams, used when mode == PREDICT.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
TPUEstimatorSpec if use tpu else EstimatorSpec
def estimator_model_fn(cls,
hparams,
features,
labels,
mode,
config=None,
params=None,
decode_hparams=None,
use_tpu=False):
"""Model fn for Estimator.
Args:
hparams: HParams, model hyperparameters
features: dict<str name, Tensor feature>
labels: Tensor
mode: tf.estimator.ModeKeys
config: RunConfig, possibly with data_parallelism attribute
params: dict, may include batch_size, use_tpu
decode_hparams: HParams, used when mode == PREDICT.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
TPUEstimatorSpec if use tpu else EstimatorSpec
"""
if mode == tf.estimator.ModeKeys.TRAIN:
create_dummy_vars()
hparams = hparams_lib.copy_hparams(hparams)
# Instantiate model
data_parallelism = None
if not use_tpu and config:
data_parallelism = config.data_parallelism
reuse = tf.get_variable_scope().reuse
model = cls(
hparams,
mode,
data_parallelism=data_parallelism,
decode_hparams=decode_hparams,
_reuse=reuse)
# PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
if use_tpu:
inputs = features.get("inputs")
if inputs is None:
inputs = features["targets"]
shape = inputs.get_shape().as_list()
if shape[0] is None:
shape[0] = decode_hparams.batch_size or hparams.batch_size
if shape[1] is None:
shape[1] = hparams.max_input_seq_length or hparams.max_length
inputs.set_shape(shape)
return model.estimator_spec_predict(features, use_tpu=use_tpu)
# TRAIN and EVAL modes
if hparams.eval_run_autoregressive and mode == tf.estimator.ModeKeys.EVAL:
logits, losses_dict = model.eval_autoregressive(features)
else:
logits, losses_dict = model(features) # pylint: disable=not-callable
# Support model-generated labels by overriding features["targets"] with
# logits["self_generated_targets"].
if isinstance(logits, dict) and "self_generated_targets" in logits:
# Overwrite 'features["targets"]' and 'labels'
# by logits["self_generated_targets"].
tf.logging.info("Replacing targets with model-provided targets.")
features["targets"] = labels = logits.pop("self_generated_targets")
assert list(logits.keys()) == ["logits"], (
# See "Returns" in the "top" method docstring for the expected
# "logits" format when targets are generated at training time.
"Expect only key 'logits' when there is 'self_generated_targets'. "
"Found {}".format(logits.keys())
)
# Recover the original logits tensor from the logits dict.
logits = logits["logits"] # Can be a tf.Tensor or a dict.
# Set known shapes
if common_layers.is_xla_compiled():
if isinstance(logits, dict):
for k, v in sorted(six.iteritems(logits)):
if "scalar/" in k:
continue
shape = v.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
v.set_shape(shape)
else:
shape = logits.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
logits.set_shape(shape)
assert "training" in losses_dict
# Attack mode
if mode == "attack":
return logits
# Summarize losses
model._summarize_losses(losses_dict) # pylint: disable=protected-access
# Accumulate losses
loss = sum(losses_dict[key] for key in sorted(losses_dict.keys()))
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
return model.estimator_spec_eval(features, logits, labels, loss,
losses_dict)
# TRAIN mode
assert mode == tf.estimator.ModeKeys.TRAIN
num_async_replicas = 1
if config and not use_tpu:
num_async_replicas = config.t2t_device_info["num_async_replicas"]
return model.estimator_spec_train(
loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu) |
Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode.
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False):
"""Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode."""
train_op = self.optimize(loss, num_async_replicas=num_async_replicas,
use_tpu=use_tpu)
if use_tpu:
if self._hparams.warm_start_from:
def scaffold_fn():
self.initialize_from_ckpt(self._hparams.warm_start_from)
return tf.train.Scaffold()
else:
scaffold_fn = None
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = self.create_train_host_call()
else:
host_call = None
remove_summaries()
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
host_call=host_call,
scaffold_fn=scaffold_fn)
else:
if self._hparams.warm_start_from:
self.initialize_from_ckpt(self._hparams.warm_start_from)
# When loading weights from a pre-trained model, you want to be able to
# load separate weights into the encoder and decoder.
if self._hparams.warm_start_from_second:
self.initialize_from_ckpt(self._hparams.warm_start_from_second)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) |
Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode.
def estimator_spec_eval(self, features, logits, labels, loss, losses_dict):
"""Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode."""
del losses_dict
hparams = self.hparams
if not hasattr(hparams, "problem"):
raise NotImplementedError(_no_problem_err("estimator_spec_eval"))
problem = hparams.problem
if common_layers.is_xla_compiled():
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = self.create_eval_host_call()
else:
host_call = None
remove_summaries()
eval_metrics_fn = create_tpu_eval_metrics_fn(problem, hparams)
batch_size = [feature.shape.as_list()[0] for _, feature
in features.items() if feature.shape.ndims][0]
# Add batch dimension to all features since tpu requires the batch
# dimension on all tensors.
for name, feature in features.items():
if not feature.shape.as_list():
# All features must have a batch dimension
feature = tf.tile(tf.expand_dims(feature, 0), [batch_size])
features[name] = feature
eval_metrics_fn_args = dict(
logits=logits, # possibly a dict
labels=labels,
features=features, # dict
)
eval_metrics_fn_flat_args = _flatten_dict(eval_metrics_fn_args)
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
eval_metrics=(eval_metrics_fn, eval_metrics_fn_flat_args),
host_call=host_call,
loss=loss)
else:
task_list = [problem]
if hasattr(problem, "task_list"):
task_list = problem.task_list
eval_metrics_fns = metrics.create_evaluation_metrics(task_list, hparams)
eval_metrics = {}
for metric_name, metric_fn in six.iteritems(eval_metrics_fns):
if isinstance(logits, dict):
# the key is located in the center of metric_name: "metrics-%s/%s/%s"
k = metric_name.split("/")[1]
if k in logits:
eval_metrics[metric_name] = metric_fn(logits[k], features,
features[k])
else:
# We do not make it an error because we sometimes run models that
# predict only parts of the targets defined by the Problem class.
# For example, an autoencoder or pure-video model can run on a gym
# problem even if another model is also predicting other things,
# like actions or rewards.
tf.logging.warning("No key %s in logits for evaluation." % k)
else:
eval_metrics[metric_name] = metric_fn(logits, features,
features["targets"])
if isinstance(logits, dict):
predictions = logits
else:
predictions = {"predictions": logits}
evaluation_hooks = []
# Create a SummarySaverHook
eval_dir = os.path.join(
self.hparams.model_dir,
self.hparams.get("eval_dir_name", "eval"))
eval_summary_hook = tf.train.SummarySaverHook(
save_steps=1,
output_dir=eval_dir,
summary_op=tf.summary.merge_all())
evaluation_hooks.append(eval_summary_hook)
evaluation_hooks += problem.eval_hooks(features, logits, hparams)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.EVAL,
predictions=predictions,
eval_metric_ops=eval_metrics,
evaluation_hooks=evaluation_hooks,
loss=loss) |
Constructs `tf.estimator.EstimatorSpec` for PREDICT (inference) mode.
def estimator_spec_predict(self, features, use_tpu=False):
"""Constructs `tf.estimator.EstimatorSpec` for PREDICT (inference) mode."""
decode_hparams = self._decode_hparams
top_beams = decode_hparams.beam_size if decode_hparams.return_beams else 1
infer_out = self.infer(
features,
beam_size=decode_hparams.beam_size,
top_beams=top_beams,
alpha=decode_hparams.alpha,
decode_length=decode_hparams.extra_length,
use_tpu=use_tpu)
if isinstance(infer_out, dict):
outputs = infer_out["outputs"]
scores = infer_out["scores"]
else:
outputs = infer_out
scores = None
inputs = features.get("inputs")
if inputs is None:
inputs = features["targets"]
predictions = {
"outputs": outputs,
"scores": scores,
"inputs": inputs,
"targets": features.get("infer_targets"),
}
# Pass through remaining features
for name, feature in features.items():
if name not in list(predictions.keys()) + ["infer_targets"]:
if name == "decode_loop_step":
continue
if not feature.shape.as_list():
# All features must have a batch dimension
batch_size = common_layers.shape_list(outputs)[0]
feature = tf.tile(tf.expand_dims(feature, 0), [batch_size])
predictions[name] = feature
_del_dict_non_tensors(predictions)
export_out = {"outputs": predictions["outputs"]}
if "scores" in predictions:
export_out["scores"] = predictions["scores"]
# Necessary to rejoin examples in the correct order with the Cloud ML Engine
# batch prediction API.
if "batch_prediction_key" in predictions:
export_out["batch_prediction_key"] = predictions["batch_prediction_key"]
export_outputs = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.PredictOutput(export_out)
}
if use_tpu:
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = self.create_eval_host_call()
else:
host_call = None
remove_summaries()
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
host_call=host_call,
export_outputs=export_outputs)
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=export_outputs) |
Adds `tf.summary`s to all terms in the losses dictionary.
def _summarize_losses(self, losses_dict):
"""Adds `tf.summary`s to all terms in the losses dictionary."""
if common_layers.should_generate_summaries():
with tf.name_scope("losses"):
for loss_name, loss_val in sorted(losses_dict.items()):
tf.summary.scalar(loss_name, loss_val) |
Scheduled sampling.
Performs forward inference again with "targets" feature replaced with values
sampled from the model.
This is the identity unless self.hparams.scheduled_sampling_prob > 0
(default).
**WARNING**: This is not a faithful implementation of scheduled sampling.
This implementation samples tokens for timestep t condtioned on gold tokens
1...t-1. A proper implementation must condition on a mix of gold and
sampled tokens. Doing so is not efficient for models such like Transformer.
Args:
features: {str: Tensor}. Features sharded along batch dimension.
logits: Tensor. Logits for each shard of data.
losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor
Returns:
new_logits: Tensor.
new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or
(ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a
weighted average.
def maybe_scheduled_sampling(self, features, logits, losses):
"""Scheduled sampling.
Performs forward inference again with "targets" feature replaced with values
sampled from the model.
This is the identity unless self.hparams.scheduled_sampling_prob > 0
(default).
**WARNING**: This is not a faithful implementation of scheduled sampling.
This implementation samples tokens for timestep t condtioned on gold tokens
1...t-1. A proper implementation must condition on a mix of gold and
sampled tokens. Doing so is not efficient for models such like Transformer.
Args:
features: {str: Tensor}. Features sharded along batch dimension.
logits: Tensor. Logits for each shard of data.
losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor
Returns:
new_logits: Tensor.
new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or
(ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a
weighted average.
"""
hparams = self.hparams
problem_hparams = self._problem_hparams
# Only do scheduled sampling if requested.
if hparams.scheduled_sampling_prob == 0.0:
return (logits, losses)
# Only do scheduled sampling on language tasks.
modality = problem_hparams.modality["targets"]
if modality != modalities.ModalityType.SYMBOL:
assert hparams.scheduled_sampling_prob == 0, (
"Scheduled sampling only applies to ModalityType.SYMBOL. Set "
"hparams.scheduled_sampling_prob == 0.0.")
return (logits, losses)
# Only do scheduled sampling when training.
is_training = (hparams.mode == tf.estimator.ModeKeys.TRAIN)
if not is_training:
tf.logging.info("Running in %s mode. Not using scheduled sampling.",
hparams.mode)
return (logits, losses)
# Pad vocabulary if vocab size must be evenly divisible by vocab_divisor.
vocab_size = problem_hparams.vocab_size["targets"]
assert vocab_size is not None
assert hparams.vocab_divisor == 1
def sample(x):
"""Multinomial sampling from a n-dimensional tensor."""
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
def mix_gold_sampled(gold_targets, sampled_targets, mixin_prob):
"""Interleave sampled and gold tokens randomly."""
return tf.where(
tf.less(
tf.random_uniform(common_layers.shape_list(sampled_targets)),
mixin_prob),
sampled_targets,
gold_targets)
def sampled_results(features, logits, mixin_prob):
"""Generate scheduled sampling results."""
sampled_targets = sample(logits)
new_targets = mix_gold_sampled(features["targets"],
sampled_targets,
mixin_prob)
new_targets = tf.stop_gradient(new_targets) # Treat new_targets as given.
new_features = copy.copy(features)
new_features["targets"] = new_targets
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# Compute bottom() for new_targets.
#
# TODO(duckworthd): Only apply bottom to 'new_targets'.
new_transformed_features = self.bottom(new_features)
# Compute body.
with tf.variable_scope("body"):
new_body_outputs, new_losses = self._normalize_body_output(
self.body(new_transformed_features))
assert "training" not in new_losses
# Compute top.
new_logits = self.top(new_body_outputs, new_features)
# Compute loss. Use original features (== labels).
if (hparams.mode != tf.estimator.ModeKeys.PREDICT and
hparams.mode != "attack"):
new_losses["training"] = self.loss(new_logits, features)
else:
new_losses["training"] = 0.0
return new_logits, new_losses
tf.logging.info("Using scheduled sampling.")
assert hparams.scheduled_sampling_prob == 1.0, (
"hparams.scheduled_sampling_prob must be 0 or 1.")
# Gradually increase over a warmup period. Lower numbers mean more gold
# tokens.
mixin_prob = (
hparams.scheduled_sampling_gold_mixin_prob *
common_layers.inverse_exp_decay(
hparams.scheduled_sampling_warmup_steps,
min_value=0.001)
)
# Apply scheduled sampling over N passes. The logits from the (n-1)-th pass
# will be mixed with gold tokens for conditioning in the n-th pass.
scheduled_sampling_num_passes = getattr(
hparams, "scheduled_sampling_num_passes", 1)
assert scheduled_sampling_num_passes > 0, (
"hparams.scheduled_sampling_num_passes must be > 0 if "
"hparams.scheduled_sampling_prob > 0.0")
new_logits = logits
new_losses = losses
for _ in range(scheduled_sampling_num_passes):
new_logits, new_losses = sampled_results(features, new_logits, mixin_prob)
return new_logits, new_losses |
Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a Tensor, containing large negative values
to implement masked attention and possibly biases for diagonal alignments
pad_remover (expert_utils.PadRemover): an util object to remove padding
def attention_lm_moe_prepare_decoder(targets, hparams):
"""Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a Tensor, containing large negative values
to implement masked attention and possibly biases for diagonal alignments
pad_remover (expert_utils.PadRemover): an util object to remove padding
"""
targets_pad_mask = common_attention.embedding_to_padding(targets)
with tf.name_scope("pad_remover"):
# Because of the shift_right, the <eos> token will be considered as
# padding. In practice, it doesn't really matter, due to the triangular
# mask, this token should never be attended.
pad_remover = expert_utils.PadRemover(targets_pad_mask)
if hparams.prepend_mode == "prepend_inputs_full_attention":
decoder_self_attention_bias = (
common_attention.attention_bias_prepend_inputs_full_attention(
targets_pad_mask))
else:
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(tf.shape(targets)[1]))
decoder_input = common_layers.shift_right_3d(targets)
if hparams.pos == "timing":
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
return (decoder_input, decoder_self_attention_bias, pad_remover) |
Return a flat int32 tensor of shape [1, batch_size*length, 1].
def get_batch_coordinate(x, axis=0):
"""Return a flat int32 tensor of shape [1, batch_size*length, 1]."""
# Compute the batch coordinate before flattening all batches
batch_coordinate = tf.expand_dims(
common_attention.coordinate_tensor(tf.shape(x)[:-1], axis=axis), axis=-1)
return batch_coordinate |
Duplicate elements of bc by length_factor.
Args:
bc (tf.Tensor): int32 tensor of shape [1, length, 1]
length_factor (int):
Returns:
tf.Tensor: of shape [1, length*length_factor, 1] where every elements has
been duplicated length_factor times.
def expand_batch_coordinates(bc, length_factor):
"""Duplicate elements of bc by length_factor.
Args:
bc (tf.Tensor): int32 tensor of shape [1, length, 1]
length_factor (int):
Returns:
tf.Tensor: of shape [1, length*length_factor, 1] where every elements has
been duplicated length_factor times.
"""
assert bc.get_shape().as_list() == [1, None, 1]
# bc has shape [1, length, 1]
bc *= tf.constant([[1] * length_factor])
# bc has shape [1, length, length_factor]
bc = tf.reshape(bc, [1, -1, 1])
# bc has shape [1, length*length_factor]
return bc |
Remove padding by concatenating all dimension into one.
Args:
x (tf.Tensor): input of shape [batch_size, length, depth]
pad_remover (obj): a PadRemover object
mode (ModeKeys): infer, train or eval. If inference, the padding remover is
not applied
Returns:
tf.Tensor of shape [1,length_nonpad,depth] where
length_nonpad <= batch_size*length
def remove_pad(x, pad_remover, mode):
"""Remove padding by concatenating all dimension into one.
Args:
x (tf.Tensor): input of shape [batch_size, length, depth]
pad_remover (obj): a PadRemover object
mode (ModeKeys): infer, train or eval. If inference, the padding remover is
not applied
Returns:
tf.Tensor of shape [1,length_nonpad,depth] where
length_nonpad <= batch_size*length
"""
# Concatenate all tokens (without padding)
x = expert_utils.flatten_all_but_last(x)
# Remove padding for training and eval
if mode != ModeKeys.PREDICT:
# This is a hack to allows inference when the <go> token
# is detected as padding and removed. This works for now because there is
# no padding at inference.
x = pad_remover.remove(x)
x = tf.expand_dims(x, axis=0) # Now batch_size=1
return x |
Set of hyperparameters.
suitable for 1 gpu.
on lm1b_32k:
~229M params
0.9 steps/sec on [GeForce GTX TITAN X]
Returns:
a hparams object
def attention_lm_moe_base():
"""Set of hyperparameters.
suitable for 1 gpu.
on lm1b_32k:
~229M params
0.9 steps/sec on [GeForce GTX TITAN X]
Returns:
a hparams object
"""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 1024
hparams.batch_size = 8192
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 4
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.0
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("filter_size", 2048) # Add new ones like this.
hparams.moe_num_experts = 32
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("moe_layers", "2") # comma separated list of layer numbers
# moe params. local attention moe.
# If attention_layers is set, the num_hidden_layers parameter will be ignored
# and each caracter of the string will correspond to one attention
# layer type
hparams.add_hparam("attention_layers", "")
hparams.add_hparam("attention_type", AttentionType.MULTIHEAD)
hparams.add_hparam("attention_local", False)
hparams.add_hparam("attention_moe_k", 2)
hparams.add_hparam("attention_num_head", 1)
hparams.add_hparam("attention_num_experts", 16)
hparams.add_hparam("attention_split_batch", False)
hparams.add_hparam("attention_red_factor", 3)
hparams.add_hparam("attention_block_length", 128)
hparams.add_hparam("attention_reduction_type", "conv")
# Non linearity for the attention reduction. Either "none", or "silu" (
# Sigmoid Linear-Unit described in https://arxiv.org/abs/1710.05941)
hparams.add_hparam("attention_nonlinearity", "none")
# If attention_exp_factor is set, each input to local_expert_attention (of
# dimensionality hidden size) is projected into attention_exp_factor smaller
# inputs, each of dimensionality attention_exp_inputdim. (otherwise
# attention_exp_inputdim is ignored)
hparams.add_hparam("attention_exp_factor", 0)
hparams.add_hparam("attention_exp_inputdim", 128)
# Key, query and value dimensions for the attention
hparams.add_hparam("attention_kq_size", 128)
hparams.add_hparam("attention_v_size", 256)
# Loss coef for load balancing
hparams.add_hparam("attention_load_balance", 2e-2)
# Locality-sensitive hashing params
hparams.add_hparam("lsh_num_hyperplanes", 4)
hparams.add_hparam("lsh_use_map_fn", False)
hparams.add_hparam("use_sepconv", False)
hparams.add_hparam("diet_experts", False)
hparams.add_hparam("memory_efficient_ffn", False)
# if True, we learn a non-autoregressive model from "inputs" to "targets".
# if False, we learn an autoregressive model to generate "targets"
hparams.add_hparam("use_inputs", False)
return hparams |
Hyper parameters specifics for long sequence generation.
def attention_lm_moe_base_long_seq():
"""Hyper parameters specifics for long sequence generation."""
hparams = attention_lm_moe_base()
hparams.max_length = 0 # max_length == batch_size
hparams.eval_drop_long_sequences = True
hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches
hparams.use_sepconv = True
return hparams |
Base model with attention expert.
def attention_lm_moe_base_ae():
"""Base model with attention expert."""
hparams = attention_lm_moe_base_long_seq()
hparams.attention_type = AttentionType.LOCAL_EXPERTS
hparams.learning_rate = 0.05
hparams.learning_rate_warmup_steps = 10000
# According to noam, ("n", "da") seems better for harder-to-learn models
# hparams.layer_preprocess_sequence = "n"
# hparams.layer_postprocess_sequence = "da"
return hparams |
Experiment with the exp_factor params.
def attention_lm_ae_extended():
"""Experiment with the exp_factor params."""
hparams = attention_lm_moe_base_long_seq()
hparams.attention_layers = "eeee"
hparams.attention_local = True
# hparams.factored_logits=1 # Necessary when the number of expert grow bigger
hparams.attention_moe_k = 2
hparams.attention_exp_factor = 4
# hparams.attention_exp_inputdim = 128
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
return hparams |
Base model with attention expert.
def attention_lm_moe_base_memeff():
"""Base model with attention expert."""
hparams = attention_lm_moe_base_long_seq()
hparams.use_sepconv = False
hparams.diet_experts = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.0
hparams.memory_efficient_ffn = True
hparams.attention_type = AttentionType.MEMORY_EFFICIENT
hparams.num_heads = 8
hparams.factored_logits = True
return hparams |
Cheap model for single-gpu training.
on lm1b_32k:
~312M params
1.6 steps/sec on [GeForce GTX TITAN X]
After 50K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.31
Returns:
an hparams object.
def attention_lm_moe_small():
"""Cheap model for single-gpu training.
on lm1b_32k:
~312M params
1.6 steps/sec on [GeForce GTX TITAN X]
After 50K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.31
Returns:
an hparams object.
"""
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 4
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.moe_num_experts = 128
hparams.moe_layers = "2"
return hparams |
Cheap model for debugging.
Returns:
an hparams object.
def attention_lm_attention_moe_tiny():
"""Cheap model for debugging.
Returns:
an hparams object.
"""
hparams = attention_lm_moe_small()
hparams.moe_layers = ""
hparams.attention_num_experts = 128
hparams.filter_size = 8192
hparams.attention_type = AttentionType.LOCAL_EXPERTS
return hparams |
Large model for distributed training.
Over 1B parameters, so requires multi-gpu training due to memory
requirements.
on lm1b_32k:
After 45K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.18
eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9
Returns:
an hparams object.
def attention_lm_moe_large():
"""Large model for distributed training.
Over 1B parameters, so requires multi-gpu training due to memory
requirements.
on lm1b_32k:
After 45K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.18
eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9
Returns:
an hparams object.
"""
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 5
hparams.moe_layers = "3"
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 4096
hparams.moe_hidden_sizes = "4096"
hparams.moe_num_experts = 128
hparams.layer_prepostprocess_dropout = 0.2
return hparams |
Memory-efficient version.
def attention_lm_moe_memory_efficient():
"""Memory-efficient version."""
hparams = attention_lm_moe_large()
hparams.diet_experts = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.0
hparams.memory_efficient_ffn = True
hparams.attention_type = AttentionType.MEMORY_EFFICIENT
hparams.num_heads = 8
hparams.factored_logits = True
return hparams |
Unnecessarily large model with 24B params - because we can.
def attention_lm_moe_24b_diet():
"""Unnecessarily large model with 24B params - because we can."""
hparams = attention_lm_moe_large_diet()
hparams.moe_hidden_sizes = "12288"
hparams.moe_num_experts = 1024
hparams.batch_size = 4096
return hparams |
Version to use for seq2seq.
def attention_lm_moe_translation():
"""Version to use for seq2seq."""
hparams = attention_lm_moe_base()
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.learning_rate = 0.4
hparams.prepend_mode = "prepend_inputs_masked_attention"
hparams.max_length = 512
hparams.label_smoothing = 0.1
hparams.layer_prepostprocess_dropout = 0.2
hparams.num_hidden_layers = 6
hparams.moe_layers = "0,1,2,3,4,5"
hparams.shared_embedding_and_softmax_weights = True
return hparams |
Version to use with languagemodel_wiki_scramble1k50.
def attention_lm_moe_unscramble_base():
"""Version to use with languagemodel_wiki_scramble1k50."""
hparams = attention_lm_no_moe_small()
hparams.use_inputs = True
hparams.min_length_bucket = 1024
hparams.max_length = 1024
hparams.batch_size = 5000
hparams.layer_prepostprocess_dropout = 0.0
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
return hparams |
Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size].
def audio_bottom(x, model_hparams, vocab_size):
"""Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size].
"""
del vocab_size # unused arg
inputs = x
with tf.variable_scope("audio_modality"):
# TODO(aidangomez): Will need to sort out a better audio pipeline
def xnet_resblock(x, filters, res_relu, name):
"""Xception block."""
with tf.variable_scope(name):
# Typically audio samples are >100k samples in length and have a width
# of 2 or 4. Mono audio has a single channel while stereo has 2.
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
x = tf.to_float(inputs) / 255.
x.set_shape([None, None, None, 1])
for i in range(model_hparams.audio_compression):
x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i)
return xnet_resblock(x,
model_hparams.hidden_size,
False,
"compress_block_final") |
Bottom transformation for target images.
def image_targets_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for target images."""
pixel_embedding_size = 64
inputs = x
with tf.variable_scope("image_modality"):
if not tf.executing_eagerly():
tf.summary.image(
"targets_bottom",
common_layers.tpu_safe_image_summary(inputs),
max_outputs=1)
inputs_shape = common_layers.shape_list(inputs)
if len(inputs_shape) != 4:
raise ValueError("Assuming images given as int tensors in the format "
"[batch, height, width, channels] (256 values).")
# We embed each of 256=vocab_size possible pixel values.
embedding_var = tf.get_variable(
"pixel_embedding",
[vocab_size, pixel_embedding_size])
hot_inputs = tf.one_hot(tf.to_int32(inputs), vocab_size)
hot_inputs = tf.reshape(hot_inputs, [-1, vocab_size])
embedded = tf.matmul(hot_inputs, embedding_var)
# Let's now merge all channels that were embedded into a single vector.
merged_size = pixel_embedding_size * inputs_shape[3]
embedded = tf.reshape(embedded, inputs_shape[:3] + [merged_size])
merged = tf.layers.dense(
embedded,
model_hparams.hidden_size,
name="merge_pixel_embedded_channels")
return merged |
Compresses channel-wise input pixels into whole pixel representions.
Perform conversion of RGB pixel values to a real number in the range -1 to
1. This combines pixel channels to form a representation of shape
[img_len, img_len].
Args:
inputs: Tensor representing RGB pixel intensities as integers, of shape
[batch, img_len, img_len, channels].
model_hparams: HParams, model hyperparmeters.
name: string, scope.
Returns:
body_input: Tensor of shape
[batch, img_len, img_len, model_hparams.hidden_size].
def _image_channel_compress_bottom(inputs, model_hparams, name="bottom"):
"""Compresses channel-wise input pixels into whole pixel representions.
Perform conversion of RGB pixel values to a real number in the range -1 to
1. This combines pixel channels to form a representation of shape
[img_len, img_len].
Args:
inputs: Tensor representing RGB pixel intensities as integers, of shape
[batch, img_len, img_len, channels].
model_hparams: HParams, model hyperparmeters.
name: string, scope.
Returns:
body_input: Tensor of shape
[batch, img_len, img_len, model_hparams.hidden_size].
"""
num_channels = 3
with tf.variable_scope(name):
inputs = tf.to_float(inputs)
hp = model_hparams
if hp.mode != tf.estimator.ModeKeys.PREDICT:
tf.summary.image(
"inputs",
common_layers.tpu_safe_image_summary(inputs),
max_outputs=2)
inputs = common_layers.convert_rgb_to_symmetric_real(inputs)
# Reshape inputs to apply convolutions across [img_len, img_len*channels].
inputs_shape = common_layers.shape_list(inputs)
inputs = tf.reshape(
inputs, [-1, inputs_shape[1], inputs_shape[2] * inputs_shape[3], 1])
# Compress RGB intensities for each pixel using a convolution.
outputs = tf.layers.conv2d(
inputs,
model_hparams.hidden_size,
kernel_size=(1, num_channels),
padding="VALID",
strides=(1, num_channels),
activation=tf.nn.relu,
name="conv_input")
return outputs |
Bottom transformation for image targets.
def image_channel_embeddings_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for image targets."""
del vocab_size # unused arg
inputs = tf.to_int32(x)
io_depth = model_hparams.num_channels
tshape = common_layers.shape_list(inputs)
hidden_size = model_hparams.hidden_size
target_embeddings = cia.get_channel_embeddings(
io_depth, inputs, hidden_size, "input_bottom")
return tf.reshape(target_embeddings,
[tshape[0], tshape[1], tshape[2] * io_depth, hidden_size]) |
Use batchnorm instead of CMVN and shorten the stft with strided convs.
Args:
x: float32 tensor with shape [batch_size, len, 1, freqs * channels]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
float32 tensor with shape [batch_size, shorter_len, 1, hidden_size]
def speech_recognition_bottom(x, model_hparams, vocab_size):
"""Use batchnorm instead of CMVN and shorten the stft with strided convs.
Args:
x: float32 tensor with shape [batch_size, len, 1, freqs * channels]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
float32 tensor with shape [batch_size, shorter_len, 1, hidden_size]
"""
del vocab_size # unused arg
inputs = x
p = model_hparams
num_mel_bins = p.audio_num_mel_bins
num_channels = 3 if p.audio_add_delta_deltas else 1
with tf.variable_scope("speech_recognition_modality"):
if p.audio_preproc_in_bottom:
# Compute filterbanks
with tf.variable_scope("fbanks"):
waveforms = tf.squeeze(inputs, [2, 3])
mel_fbanks = common_audio.compute_mel_filterbank_features(
waveforms,
sample_rate=p.audio_sample_rate,
dither=p.audio_dither,
preemphasis=p.audio_preemphasis,
frame_length=p.audio_frame_length,
frame_step=p.audio_frame_step,
lower_edge_hertz=p.audio_lower_edge_hertz,
upper_edge_hertz=p.audio_upper_edge_hertz,
num_mel_bins=p.audio_num_mel_bins,
apply_mask=True)
if p.audio_add_delta_deltas:
mel_fbanks = common_audio.add_delta_deltas(mel_fbanks)
x = tf.reshape(mel_fbanks,
common_layers.shape_list(mel_fbanks)[:2] +
[num_mel_bins, num_channels])
nonpadding_mask = 1. - common_attention.embedding_to_padding(x)
num_of_nonpadding_elements = tf.reduce_sum(
nonpadding_mask) * num_mel_bins * num_channels
# This replaces CMVN estimation on data
var_epsilon = 1e-09
mean = tf.reduce_sum(
x, axis=[1], keepdims=True) / num_of_nonpadding_elements
variance = (num_of_nonpadding_elements * mean**2. -
2. * mean * tf.reduce_sum(x, axis=[1], keepdims=True) +
tf.reduce_sum(x**2, axis=[1], keepdims=True)
) / num_of_nonpadding_elements
x = (x - mean) * tf.rsqrt(variance + var_epsilon) * tf.expand_dims(
nonpadding_mask, -1)
else:
x = inputs
# The convention is that the models are flattened along the spatial,
# dimensions, thus the speech preprocessor treats frequencies and
# channels as image colors (last axis)
x.set_shape([None, None, num_mel_bins, num_channels])
# TODO(chorowski): how to specify bottom's hparams and avoid hardcoding?
x = tf.pad(x, [[0, 0], [0, 8], [0, 0], [0, 0]])
for _ in range(2):
x = tf.layers.conv2d(
x, 128, (3, 3), (2, 2), use_bias=False)
x = common_layers.layer_norm(x)
x = tf.nn.relu(x)
xshape = common_layers.shape_list(x)
# apply a conv that will remove all frequencies and at the same time
# project the output into desired hidden_size
x = tf.pad(x, [[0, 0], [0, 2], [0, 0], [0, 0]])
x = tf.layers.conv2d(x, p.hidden_size, (3, xshape[2]), use_bias=False)
assert common_layers.shape_list(x)[2] == 1
x = common_layers.layer_norm(x)
x = tf.nn.relu(x)
return x |
Create or get concatenated embedding or softmax variable.
Args:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size
Returns:
a list of num_shards Tensors.
def get_weights(model_hparams, vocab_size, hidden_dim=None):
"""Create or get concatenated embedding or softmax variable.
Args:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size
Returns:
a list of num_shards Tensors.
"""
if hidden_dim is None:
hidden_dim = model_hparams.hidden_size
num_shards = model_hparams.symbol_modality_num_shards
shards = []
for i in range(num_shards):
shard_size = (vocab_size // num_shards) + (
1 if i < vocab_size % num_shards else 0)
var_name = "weights_%d" % i
shards.append(
tf.get_variable(
var_name, [shard_size, hidden_dim],
initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
if num_shards == 1:
ret = shards[0]
else:
ret = tf.concat(shards, 0)
# Convert ret to tensor.
if not tf.executing_eagerly():
ret = common_layers.convert_gradient_to_tensor(ret)
return ret |
Bottom transformation for symbols.
def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse):
"""Bottom transformation for symbols."""
with tf.variable_scope(name, reuse=reuse):
# Ensure the inputs are 3-D
if len(x.get_shape()) == 4:
x = tf.squeeze(x, axis=3)
while len(x.get_shape()) < 3:
x = tf.expand_dims(x, axis=-1)
var = get_weights(model_hparams, vocab_size)
x = common_layers.dropout_no_scaling(
x, 1.0 - model_hparams.symbol_dropout)
ret = common_layers.gather(var, x)
if model_hparams.multiply_embedding_mode == "sqrt_depth":
ret *= model_hparams.hidden_size**0.5
ret *= tf.expand_dims(
common_layers.cast_like(tf.not_equal(x, 0), ret), -1)
return ret |
Bottom transformation for target symbols.
def symbol_targets_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for target symbols."""
if (model_hparams.shared_embedding_and_softmax_weights or
model_hparams.get("shared_embedding")):
try:
return _symbol_bottom_simple(
x, model_hparams, vocab_size, "shared", reuse=True)
except ValueError:
# perhaps there were no inputs, and this is a new variable.
return _symbol_bottom_simple(
x, model_hparams, vocab_size, "shared", reuse=None)
else:
return _symbol_bottom_simple(
x, model_hparams, vocab_size, "target_emb", reuse=None) |
Bottom transformation for embedding video bitwise.
def video_bitwise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for embedding video bitwise."""
pixel_embedding_size = 64
inputs = x
with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE):
common_layers.summarize_video(inputs, "bottom")
# Embed bitwise.
assert vocab_size == 256
embedded = discretization.int_to_bit_embed(inputs, 8,
pixel_embedding_size)
# Project.
return tf.layers.dense(
embedded,
model_hparams.hidden_size,
name="merge_pixel_embedded_frames") |
Bottom transformation for video.
def video_pixel_noise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for video."""
input_noise = getattr(model_hparams, "video_modality_input_noise", 0.25)
inputs = x
if model_hparams.mode == tf.estimator.ModeKeys.TRAIN:
background = tfp.stats.percentile(inputs, 50., axis=[0, 1, 2, 3])
input_shape = common_layers.shape_list(inputs)
input_size = tf.reduce_prod(input_shape[:-1])
input_mask = tf.multinomial(
tf.log([[input_noise, 1.-input_noise]]), input_size)
input_mask = tf.reshape(tf.cast(input_mask, tf.int32),
input_shape[:-1]+[1])
inputs = inputs * input_mask + background * (1 - input_mask)
return video_bottom(inputs, model_hparams, vocab_size) |
Convert prediction and target from rgb to real.
def convert_rgb_to_real(prediction, targets):
"""Convert prediction and target from rgb to real."""
prediction = tf.squeeze(prediction, axis=-1)
prediction = common_layers.convert_rgb_to_real(prediction)
targets = common_layers.convert_rgb_to_real(targets)
return prediction, targets |
Compute the CTC loss.
def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn):
"""Compute the CTC loss."""
del model_hparams, vocab_size # unused arg
logits = top_out
with tf.name_scope("ctc_loss", values=[logits, targets]):
# For CTC we assume targets are 1d, [batch, length, 1, 1] here.
targets_shape = targets.get_shape().as_list()
assert len(targets_shape) == 4
assert targets_shape[2] == 1
assert targets_shape[3] == 1
targets = tf.squeeze(targets, axis=[2, 3])
logits = tf.squeeze(logits, axis=[2, 3])
targets_mask = 1 - tf.to_int32(tf.equal(targets, 0))
targets_lengths = tf.reduce_sum(targets_mask, axis=1)
sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse(
targets, targets_lengths)
xent = tf.nn.ctc_loss(
sparse_targets,
logits,
targets_lengths,
time_major=False,
preprocess_collapse_repeated=False,
ctc_merge_repeated=False)
weights = weight_fn(targets)
return tf.reduce_sum(xent), tf.reduce_sum(weights) |
Compute loss numerator and denominator for one shard of output.
def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = common_attention.maybe_upcast(logits, hparams=model_hparams)
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn) |
Average loss over the labels.
def multi_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Average loss over the labels."""
del vocab_size # unused arg
logits = top_out
num_labels = tf.shape(targets)[1]
logits = tf.tile(logits, [1, num_labels, 1, 1, 1])
xent, weights = common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
weights_fn=weights_fn,
reduce_sum=False,
)
xent = tf.squeeze(xent, [2, 3])
weights = tf.squeeze(weights, [2, 3])
# average loss over all labels
loss = tf.reduce_sum(xent, axis=1)
weights = tf.reduce_sum(weights, axis=1)
loss /= (weights + 1e-8)
weights = tf.to_float(tf.greater(weights, 0.))
return tf.reduce_sum(loss*weights), tf.reduce_sum(weights) |
Apply softmax cross-entropy between outputs and targets.
Args:
top_out: logits Tensor with shape [batch, ?, ?, num_classes]
targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
weights_fn:
Returns:
loss_scale (cross-entropy), loss_denom
def one_hot_class_label_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Apply softmax cross-entropy between outputs and targets.
Args:
top_out: logits Tensor with shape [batch, ?, ?, num_classes]
targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
weights_fn:
Returns:
loss_scale (cross-entropy), loss_denom
"""
del model_hparams, vocab_size # unused arg
loss_scale = tf.losses.softmax_cross_entropy(
onehot_labels=targets, logits=top_out)
weights = weights_fn(targets)
loss_denom = tf.reduce_sum(weights)
return loss_scale, loss_denom |
Poisson loss for real.
def real_log_poisson_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Poisson loss for real."""
del model_hparams, vocab_size # unused arg
predictions = top_out
if (len(common_layers.shape_list(top_out)) != len(
common_layers.shape_list(targets))):
predictions = tf.squeeze(top_out, axis=[-1])
with tf.name_scope("log_possion"):
weights = weights_fn(targets)
lp_loss = tf.nn.log_poisson_loss(targets, predictions)
return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights) |
Loss for class label.
def sigmoid_class_label_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Loss for class label."""
# Expect inputs of size [batch-size, timesteps, 1, num-classes], where the
# last dimension of num-classes represents logits for binary labels
del model_hparams, vocab_size # unused arg
loss_scale = tf.losses.sigmoid_cross_entropy(
multi_class_labels=targets, logits=top_out)
weights = weights_fn(targets)
loss_denom = tf.reduce_sum(weights)
return loss_scale, loss_denom |
Compute loss numerator and denominator for one shard of output.
def video_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn) |
Compute loss numerator and denominator for one shard of output.
def video_l1_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
weights = weights_fn(targets)
# Shift targets by 0.5 so later just casting to int gives the prediction.
# So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5.
# Later (in merics or infer) this is cast to int anyway. Also, we have no
# loss beyond cutoff = 0.2 as these are already correct predictions.
targets = tf.to_float(targets) + 0.5
loss = video_l1_internal_loss(logits, targets, model_hparams)
return tf.reduce_sum(loss * weights), tf.reduce_sum(weights) |
Compute loss numerator and denominator for one shard of output.
def video_l2_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1])
targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:])
weights = weights_fn(targets)
# Shift targets by 0.5 so later just casting to int gives the prediction.
# So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5.
# Later (in merics or infer) this is cast to int anyway. Also, we have no
# loss beyond cutoff = 0.2 as these are already correct predictions.
targets = tf.to_float(targets) + 0.5
loss = video_l2_internal_loss(logits, targets, model_hparams)
return tf.reduce_sum(loss * weights), tf.reduce_sum(weights) |
Transform inputs from model space to target space.
Average over inner dims and a linear layer to logits.
Args:
body_output: A Tensor with shape [batch, ?, ?, body_output_size].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]
def class_label_top(body_output, targets, model_hparams, vocab_size):
"""Transform inputs from model space to target space.
Average over inner dims and a linear layer to logits.
Args:
body_output: A Tensor with shape [batch, ?, ?, body_output_size].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]
"""
del targets # unused arg
with tf.variable_scope("class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
res = tf.layers.dense(x, vocab_size)
return tf.expand_dims(res, 3) |
Top transformation for images.
def image_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for images."""
del targets # unused arg
# TODO(lukaszkaiser): is this a universal enough way to get channels?
num_channels = model_hparams.problem.num_channels
with tf.variable_scope("rgb_softmax"):
body_output_shape = common_layers.shape_list(body_output)
reshape_shape = body_output_shape[:3]
reshape_shape.extend([num_channels, vocab_size])
res = tf.layers.dense(body_output, vocab_size * num_channels)
res = tf.reshape(res, reshape_shape)
if not tf.get_variable_scope().reuse:
res_argmax = tf.argmax(res, axis=-1)
tf.summary.image(
"result",
common_layers.tpu_safe_image_summary(res_argmax),
max_outputs=1)
return res |
Transforms body output to return logits.
Args:
body_output: Tensor of shape [batch, img_len, img_len, depth].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
Tensor of shape [batch, img_len, img_len, channels, vocab_size].
def image_channel_compress_top(body_output, targets, model_hparams, vocab_size):
"""Transforms body output to return logits.
Args:
body_output: Tensor of shape [batch, img_len, img_len, depth].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
Tensor of shape [batch, img_len, img_len, channels, vocab_size].
"""
del targets # unused arg
with tf.variable_scope("image_channel_compress_modality"):
hidden_size = model_hparams.hidden_size
img_len = model_hparams.img_len
channels = 3 # RGB
batch = common_layers.shape_list(body_output)[0]
x = tf.layers.conv2d(
body_output,
hidden_size * channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
activation=tf.nn.relu,
name="decompress_conv")
x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_size])
x = common_layers.layer_preprocess(x, model_hparams)
x = tf.layers.dense(x,
vocab_size,
use_bias=True,
activation=None,
name="output_conv")
x = tf.reshape(
x, [batch, img_len, img_len, channels, vocab_size])
return x |
Top transformation for images.
def image_channel_embeddings_top(body_output,
targets,
model_hparams,
vocab_size):
"""Top transformation for images."""
del targets # unused arg
with tf.variable_scope("image_channel_embeddings_bottom"):
img_len = model_hparams.img_len
channels = model_hparams.num_channels
x = tf.layers.dense(
body_output, 256, use_bias=True, activation=None, name="output_conv")
x = tf.reshape(x,
[-1, img_len, img_len, channels, vocab_size])
return x |
Loss for class label.
def softmax_average_pooling_class_label_top(body_output,
targets,
model_hparams,
vocab_size):
"""Loss for class label."""
del targets # unused arg
with tf.variable_scope(
"softmax_average_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_mean(x, axis=1, keepdims=True)
return tf.layers.dense(x, vocab_size) |
Loss for class label.
def softmax_last_timestep_class_label_top(body_output,
targets,
model_hparams,
vocab_size):
"""Loss for class label."""
del targets # unused arg
with tf.variable_scope(
"softmax_last_timestep_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.expand_dims(x[:, -1], 1) # Pick the last timestep
return tf.layers.dense(x, vocab_size) |
Loss for class label.
def softmax_max_pooling_class_label_top(body_output,
targets,
model_hparams,
vocab_size):
"""Loss for class label."""
del targets # unused arg
with tf.variable_scope(
"softmax_max_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_max(x, axis=1, keepdims=True)
return tf.layers.dense(x, vocab_size) |
Generate logits.
Args:
body_output: A Tensor with shape
[batch, p0, p1, model_hparams.hidden_size].
targets: Unused.
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
def symbol_top(body_output, targets, model_hparams, vocab_size):
"""Generate logits.
Args:
body_output: A Tensor with shape
[batch, p0, p1, model_hparams.hidden_size].
targets: Unused.
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
"""
del targets # unused arg
if model_hparams.shared_embedding_and_softmax_weights:
scope_name = "shared"
reuse = tf.AUTO_REUSE
else:
scope_name = "softmax"
reuse = False
with tf.variable_scope(scope_name, reuse=reuse):
body_output_shape = common_layers.shape_list(body_output)
var = get_weights(model_hparams, vocab_size, body_output_shape[-1])
if (model_hparams.factored_logits and
model_hparams.mode == tf.estimator.ModeKeys.TRAIN):
# insert channels dimension
body_output = tf.expand_dims(body_output, 3)
return common_layers.FactoredTensor(body_output, var)
else:
body_output = tf.reshape(body_output, [-1, body_output_shape[-1]])
logits = tf.matmul(body_output, var, transpose_b=True)
return tf.reshape(logits,
body_output_shape[:-1] + [1, vocab_size]) |
Top transformation for video.
def video_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for video."""
del targets # unused arg
num_channels = model_hparams.problem.num_channels
shape = common_layers.shape_list(body_output)
reshape_shape = shape[:-1] + [num_channels, vocab_size]
res = tf.reshape(body_output, reshape_shape)
# Calculate argmax so as to have a summary with the produced images.
x = tf.argmax(tf.reshape(res, [-1, vocab_size]), axis=-1)
x = tf.reshape(x, shape[:-1] + [num_channels])
common_video.gif_summary("results", x, max_outputs=1)
return res |
Top transformation for video.
def video_l1_top(body_output, targets, model_hparams, vocab_size):
"""Top transformation for video."""
del targets, vocab_size # unused arg
num_channels = model_hparams.problem.num_channels
num_frames = model_hparams.video_num_target_frames
with tf.variable_scope("rgb"):
body_output_shape = common_layers.shape_list(body_output)
res = tf.layers.dense(body_output, num_channels * num_frames, name="cast")
res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames])
res = tf.transpose(res, [0, 4, 1, 2, 3]) # Move frames next to batch.
if not tf.get_variable_scope().reuse:
res_argmax = res[:, -1, :, :, :]
tf.summary.image(
"result",
common_layers.tpu_safe_image_summary(res_argmax),
max_outputs=1)
return tf.expand_dims(res, axis=-1) |
Gets default bottom transformation; if none available, return value.
def get_bottom(modality_type, value=None):
"""Gets default bottom transformation; if none available, return value."""
if modality_type == ModalityType.AUDIO:
return audio_bottom
elif modality_type == ModalityType.AUDIO_SPECTRAL:
return audio_spectral_bottom
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SIGMOID_CLASS_LABEL,
ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL,
ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL):
return class_label_bottom
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return symbol_bottom
elif modality_type in (ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM):
return identity_bottom
elif modality_type == ModalityType.IMAGE:
return image_bottom
elif modality_type in (ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.IMAGE_CHANNEL_COMPRESS):
return image_channel_compress_bottom
elif modality_type in (ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS):
return real_bottom
elif modality_type == ModalityType.SPEECH_RECOGNITION:
return speech_recognition_bottom
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_bottom
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2):
return video_bottom
elif modality_type == ModalityType.VIDEO_BITWISE:
return video_bitwise_bottom
elif modality_type == ModalityType.VIDEO_IDENTITY:
return video_identity_bottom
elif modality_type in (ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW):
return video_raw_bottom
elif modality_type == ModalityType.VIDEO_PIXEL_NOISE:
return video_pixel_noise_bottom
return value |
Gets default loss transformation; if none available, return value.
def get_loss(modality_type, value=None):
"""Gets default loss transformation; if none available, return value."""
if modality_type in (ModalityType.AUDIO,
ModalityType.AUDIO_SPECTRAL,
ModalityType.CLASS_LABEL,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE,
ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.IMAGE_CHANNEL_COMPRESS,
ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM,
ModalityType.REAL,
ModalityType.SPEECH_RECOGNITION,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return generic_loss
elif modality_type == ModalityType.CTC_SYMBOL:
return ctc_symbol_loss
elif modality_type == ModalityType.GENERIC_L2_LOSS:
return generic_l2_loss
elif modality_type == ModalityType.MULTI_LABEL:
return multi_label_loss
elif modality_type in (ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL,
ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL):
return one_hot_class_label_loss
elif modality_type == ModalityType.REAL_L2_LOSS:
return real_l2_loss
elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS:
return real_log_poisson_loss
elif modality_type == ModalityType.SIGMOID_CLASS_LABEL:
return sigmoid_class_label_loss
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
return sigmoid_max_pooling_class_label_loss
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_loss
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_BITWISE,
ModalityType.VIDEO_PIXEL_NOISE):
return video_loss
elif modality_type == ModalityType.VIDEO_IDENTITY:
return video_identity_loss
elif modality_type == ModalityType.VIDEO_L1:
return video_l1_loss
elif modality_type == ModalityType.VIDEO_L1_RAW:
return video_l1_raw_loss
elif modality_type == ModalityType.VIDEO_L2:
return video_l2_loss
elif modality_type == ModalityType.VIDEO_L2_RAW:
return video_l2_raw_loss
return value |
Gets default name for transformations; if none available, return value.
def get_name(modality_type, value=None):
"""Gets default name for transformations; if none available, return value."""
# For legacy reasons, modalities vary in their naming scheme. Future plans are
# to remove any need for get_name. We do not recommend using it.
if modality_type == ModalityType.AUDIO:
return lambda model_hparams, vocab_size: "audio_modality"
elif modality_type == ModalityType.AUDIO_SPECTRAL:
return lambda model_hparams, vocab_size: "audio_spectral_modality"
elif modality_type == ModalityType.GENERIC_L2_LOSS:
return lambda model_hparams, vocab_size: "generic_l2_loss_modality"
elif modality_type == ModalityType.IDENTITY:
return lambda model_hparams, vocab_size: "identity_modality"
elif modality_type == ModalityType.IMAGE:
return lambda model_hparams, vocab_size: "image_modality"
elif modality_type == ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY:
return (lambda model_hparams, vocab_size: # pylint: disable=g-long-lambda
"image_channel_bottom_identity_modality")
elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS:
return lambda model_hparams, vocab_size: "image_channel_compress_modality"
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return lambda model_hparams, vocab_size: "image_channel_embeddings_bottom"
elif modality_type == ModalityType.REAL:
return lambda model_hparams, vocab_size: "real_modality"
elif modality_type == ModalityType.REAL_L2_LOSS:
return lambda model_hparams, vocab_size: "real_l2_loss_modality"
elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS:
return lambda model_hparams, vocab_size: "real_log_poisson_loss_modality"
elif modality_type == ModalityType.SPEECH_RECOGNITION:
return lambda model_hparams, vocab_size: "speech_recognition_modality"
elif modality_type == ModalityType.VIDEO:
return lambda model_hparams, vocab_size: "video_modality"
elif modality_type == ModalityType.VIDEO_BITWISE:
return lambda model_hparams, vocab_size: "video_modality_bitwise"
elif modality_type == ModalityType.VIDEO_IDENTITY:
return lambda model_hparams, vocab_size: "video_modality_identity"
elif modality_type == ModalityType.VIDEO_L1:
return lambda model_hparams, vocab_size: "video_modality_l1"
elif modality_type == ModalityType.VIDEO_L1_RAW:
return lambda model_hparams, vocab_size: "video_modality_l1_raw"
elif modality_type == ModalityType.VIDEO_L2:
return lambda model_hparams, vocab_size: "video_modality_l2"
elif modality_type == ModalityType.VIDEO_L2_RAW:
return lambda model_hparams, vocab_size: "video_modality_l2_raw"
elif modality_type == ModalityType.VIDEO_PIXEL_NOISE:
return lambda model_hparams, vocab_size: "video_modality_pixel_noise"
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL):
def name(model_hparams, vocab_size):
return "class_label_modality_%d_%d" % (vocab_size,
model_hparams.hidden_size)
return name
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.IDENTITY_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL,
ModalityType.SYMBOL_ONE_HOT):
def name(model_hparams, vocab_size):
return "symbol_modality_%d_%d" % (vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SIGMOID_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "sigmoid_class_symbol_modality_%d_%d" % (vocab_size,
model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "sigmoid_max_pooling_class_symbol_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_average_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_last_timestep_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL:
def name(model_hparams, vocab_size):
return "softmax_max_pooling_onehot_class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)
return name
return value |
Gets default bottom transformation for targets; if none, return value.
def get_targets_bottom(modality_type, value=None):
"""Gets default bottom transformation for targets; if none, return value."""
if modality_type == ModalityType.AUDIO:
return make_targets_bottom(audio_bottom)
elif modality_type == ModalityType.AUDIO_SPECTRAL:
return make_targets_bottom(audio_spectral_bottom)
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SIGMOID_CLASS_LABEL,
ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL,
ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL,
ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL):
return class_label_targets_bottom
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return symbol_targets_bottom
elif modality_type in (ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY_SYMBOL):
return identity_bottom
elif modality_type == ModalityType.IDENTITY:
return make_targets_bottom(identity_bottom)
elif modality_type == ModalityType.IMAGE:
return image_targets_bottom
elif modality_type in (ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.IMAGE_CHANNEL_COMPRESS):
return image_channel_compress_targets_bottom
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return image_channel_embeddings_bottom
elif modality_type in (ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS):
return make_targets_bottom(real_bottom)
elif modality_type == ModalityType.SPEECH_RECOGNITION:
return make_targets_bottom(speech_recognition_bottom)
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_bottom
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2):
return video_targets_bottom
elif modality_type == ModalityType.VIDEO_BITWISE:
return video_bitwise_targets_bottom
elif modality_type == ModalityType.VIDEO_IDENTITY:
return video_identity_targets_bottom
elif modality_type in (ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW):
return video_raw_targets_bottom
elif modality_type == ModalityType.VIDEO_PIXEL_NOISE:
return make_targets_bottom(video_pixel_noise_bottom)
return value |
Gets default top transformation; if none available, return value.
def get_top(modality_type, value=None):
"""Gets default top transformation; if none available, return value."""
if modality_type in (ModalityType.AUDIO,
ModalityType.AUDIO_SPECTRAL,
ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.SPEECH_RECOGNITION,
ModalityType.VIDEO_IDENTITY):
return identity_top
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SIGMOID_CLASS_LABEL):
return class_label_top
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return symbol_top
elif modality_type == ModalityType.IMAGE:
return image_top
elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS:
return image_channel_compress_top
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return image_channel_embeddings_top
elif modality_type in (ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS):
return real_top
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
return sigmoid_max_pooling_class_label_top
elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL:
return softmax_average_pooling_class_label_top
elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL:
return softmax_last_timestep_class_label_top
elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL:
return softmax_max_pooling_class_label_top
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_top
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_BITWISE,
ModalityType.VIDEO_PIXEL_NOISE):
return video_top
elif modality_type in (ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2):
return video_l1_top
elif modality_type in (ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW):
return video_raw_top
return value |
Gets default weights function; if none available, return value.
def get_weights_fn(modality_type, value=None):
"""Gets default weights function; if none available, return value."""
if modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.IDENTITY_SYMBOL,
ModalityType.MULTI_LABEL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_ONE_HOT):
return common_layers.weights_nonzero
elif modality_type in ModalityType.get_choices():
return common_layers.weights_all
return value |
Generates all possible pair combinations for the input list of sentences.
For example:
input = ["paraphrase1", "paraphrase2", "paraphrase3"]
output = [("paraphrase1", "paraphrase2"),
("paraphrase1", "paraphrase3"),
("paraphrase2", "paraphrase3")]
Args:
list_of_sentences: the list of input sentences.
Returns:
the list of all possible sentence pairs.
def create_combination(list_of_sentences):
"""Generates all possible pair combinations for the input list of sentences.
For example:
input = ["paraphrase1", "paraphrase2", "paraphrase3"]
output = [("paraphrase1", "paraphrase2"),
("paraphrase1", "paraphrase3"),
("paraphrase2", "paraphrase3")]
Args:
list_of_sentences: the list of input sentences.
Returns:
the list of all possible sentence pairs.
"""
num_sentences = len(list_of_sentences) - 1
combinations = []
for i, _ in enumerate(list_of_sentences):
if i == num_sentences:
break
num_pairs = num_sentences - i
populated = num_pairs * [list_of_sentences[i]]
zipped = list(zip(populated, list_of_sentences[i + 1:]))
combinations += zipped
return combinations |
Set of hyperparameters.
def image_transformer2d_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 0.2
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.bottom["targets"] = modalities.make_targets_bottom(
modalities.image_channel_embeddings_bottom)
hparams.top["targets"] = modalities.identity_top
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.add_hparam("filter_size", 512) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("num_output_layers", 3)
hparams.add_hparam("block_size", 1)
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
# Local 2D attention params
hparams.add_hparam("query_shape", (16, 16))
hparams.add_hparam("memory_flange", (16, 32))
hparams.add_hparam("num_encoder_layers", 4)
hparams.add_hparam("num_decoder_layers", 8)
# attention type related params
hparams.add_hparam("enc_attention_type", cia.AttentionType.GLOBAL)
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_2D)
hparams.add_hparam("block_raster_scan", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("unconditional", False) # unconditional generation
# relative embedding hparams
hparams.add_hparam("shared_rel", False)
return hparams |
hparams fo 8 layer big 2d model for cifar 10.
def imagetransformer2d_base_8l_8_32_big():
"""hparams fo 8 layer big 2d model for cifar 10."""
hparams = image_transformer2d_base()
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.3
hparams.query_shape = (8, 16)
hparams.memory_flange = (0, 32)
hparams.unconditional = int(False)
return hparams |
big 1d model for unconditional generation on imagenet.
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d():
"""big 1d model for unconditional generation on imagenet."""
hparams = image_transformer2d_base()
hparams.unconditional = True
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.img_len = 64
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 1
hparams.max_length = 3075
hparams.max_length = 14000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.1
hparams.dec_attention_type = cia.AttentionType.LOCAL_2D
hparams.query_shape = (16, 16)
hparams.memory_flange = (8, 8)
return hparams |
Base params for img2img 2d attention.
def img2img_transformer2d_base():
"""Base params for img2img 2d attention."""
hparams = image_transformer2d_base()
# learning related flags
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# This version seems to benefit from a higher learning rate.
hparams.learning_rate = 0.2
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate_warmup_steps = 12000
hparams.filter_size = 2048
hparams.num_encoder_layers = 4
hparams.num_decoder_layers = 8
hparams.bottom["inputs"] = modalities.image_channel_embeddings_bottom
hparams.dec_attention_type = cia.AttentionType.LOCAL_2D
hparams.block_raster_scan = True
return hparams |
Current best hparams for local 2d.
def img2img_transformer2d_q3():
"""Current best hparams for local 2d."""
hparams = img2img_transformer2d_q1()
hparams.batch_size = 2
hparams.query_shape = (8, 16)
hparams.memory_flange = (8, 32)
return hparams |
Base params for local1d attention.
def img2img_transformer_base():
"""Base params for local1d attention."""
hparams = image_transformer2d_base()
# learning related flags
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# This version seems to benefit from a higher learning rate.
hparams.learning_rate = 0.2
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate_warmup_steps = 12000
hparams.filter_size = 2048
hparams.num_encoder_layers = 4
hparams.num_decoder_layers = 8
hparams.block_length = 256
hparams.block_width = 256
hparams.dec_attention_type = cia.AttentionType.LOCAL_1D
hparams.block_raster_scan = False
return hparams |
Current best hparams for local 1d.
def img2img_transformer_b3():
"""Current best hparams for local 1d."""
hparams = img2img_transformer_base()
hparams.batch_size = 2
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.block_length = 128
hparams.sampling_temp = 0.9
return hparams |
Try dilated.
def img2img_transformer_dilated():
"""Try dilated."""
hparams = img2img_transformer_base()
hparams.add_hparam("num_memory_blocks", 1)
hparams.num_heads = 8
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.sampling_method = "random"
hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0]
hparams.dec_attention_type = cia.AttentionType.DILATED
hparams.img_len = 64
hparams.block_length = 128
hparams.block_width = 128
return hparams |
Hparams for training img2img_transformer on tpu.
def img2img_transformer_base_tpu():
"""Hparams for training img2img_transformer on tpu."""
hparams = img2img_transformer_base()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 8
hparams.num_encoder_layers = 4
hparams.shared_embedding_and_softmax_weights = False
return hparams |
Set of hyperparameters.
def img2img_transformer2d_n31():
"""Set of hyperparameters."""
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_encoder_layers = 6
hparams.num_decoder_layers = 12
hparams.num_heads = 8
hparams.query_shape = (16, 32)
hparams.memory_flange = (16, 32)
return hparams |
Set of hyperparameters.
def img2img_transformer2d_n24():
"""Set of hyperparameters."""
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.hidden_size = 1024
hparams.filter_size = 2048
hparams.layer_prepostprocess_dropout = 0.2
hparams.num_decoder_layers = 8
hparams.query_shape = (8, 16)
hparams.memory_flange = (8, 32)
return hparams |
Tiny params.
def img2img_transformer2d_tiny():
"""Tiny params."""
hparams = img2img_transformer2d_base()
hparams.num_decoder_layers = 2
hparams.hidden_size = 128
hparams.batch_size = 4
hparams.max_length = 128
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.filter_size = 128
hparams.num_heads = 4
hparams.pos = "timing"
hparams.img_len = 32
return hparams |
Tiny params.
def img2img_transformer_tiny():
"""Tiny params."""
hparams = img2img_transformer2d_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.batch_size = 4
hparams.max_length = 128
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.filter_size = 128
hparams.num_heads = 1
hparams.pos = "timing"
return hparams |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.